comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Hmm, not sure, after all. | private void stallWhilePendingAbove(long pending) throws InterruptedException {
while (pendingBytes() > pending) {
Thread.sleep(1);
}
} | Thread.sleep(1); | private void stallWhilePendingAbove(long pending) throws InterruptedException {
while (pendingBytes() > pending) {
Thread.sleep(1);
}
} | class TrackCompletition implements CompletionHandler {
final long written;
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
acked.addAndGet(written);
}
@Override
public void failed(Throwable t) {
acked.addAndGet(written);
}
} | class TrackCompletition implements CompletionHandler {
private final long written;
private final AtomicBoolean replied = new AtomicBoolean(false);
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
@Override
public void failed(Throwable t) {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
} |
```suggestion } catch (InterruptedException ignored) { throw new IOException("Interrupted waiting for IO"); } ``` | public void send(ByteBuffer src) throws IOException {
try {
stallWhilePendingAbove(maxPending);
} catch (InterruptedException e) {}
send(src, new TrackCompletition(src.remaining()));
} | } catch (InterruptedException e) {} | public void send(ByteBuffer src) throws IOException {
try {
stallWhilePendingAbove(maxPending);
} catch (InterruptedException ignored) {
throw new IOException("Interrupted waiting for IO");
}
CompletionHandler pendingTracker = new TrackCompletition(src.remaining());
try {
send(src, pendingTracker);
} catch (Throwable throwable) {
pendingTracker.failed(throwable);
throw throwable;
}
} | class TrackCompletition implements CompletionHandler {
final long written;
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
acked.addAndGet(written);
}
@Override
public void failed(Throwable t) {
acked.addAndGet(written);
}
} | class TrackCompletition implements CompletionHandler {
private final long written;
private final AtomicBoolean replied = new AtomicBoolean(false);
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
@Override
public void failed(Throwable t) {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
} |
```suggestion throw new IOException("Interrupted waiting for IO"); ``` | public void flush() throws IOException {
super.flush();
try {
stallWhilePendingAbove(0);
}
catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for underlying IO to complete", e);
}
} | throw new RuntimeException("Interrupted waiting for underlying IO to complete", e); | public void flush() throws IOException {
super.flush();
try {
stallWhilePendingAbove(0);
}
catch (InterruptedException e) {
throw new IOException("Interrupted waiting for IO");
}
} | class TrackCompletition implements CompletionHandler {
final long written;
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
acked.addAndGet(written);
}
@Override
public void failed(Throwable t) {
acked.addAndGet(written);
}
} | class TrackCompletition implements CompletionHandler {
private final long written;
private final AtomicBoolean replied = new AtomicBoolean(false);
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
@Override
public void failed(Throwable t) {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
} |
If you use signalling you increase the cost as that is around 3x more costly than an atomic add operation. We do not need the immediate wakeup as the inflight amount is significantly larger than a 1ms window. In these cases I find it preferable to use sleep. | private void stallWhilePendingAbove(long pending) throws InterruptedException {
while (pendingBytes() > pending) {
Thread.sleep(1);
}
} | Thread.sleep(1); | private void stallWhilePendingAbove(long pending) throws InterruptedException {
while (pendingBytes() > pending) {
Thread.sleep(1);
}
} | class TrackCompletition implements CompletionHandler {
final long written;
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
acked.addAndGet(written);
}
@Override
public void failed(Throwable t) {
acked.addAndGet(written);
}
} | class TrackCompletition implements CompletionHandler {
private final long written;
private final AtomicBoolean replied = new AtomicBoolean(false);
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
@Override
public void failed(Throwable t) {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
} |
Yup. Thanks for the numbers. | private void stallWhilePendingAbove(long pending) throws InterruptedException {
while (pendingBytes() > pending) {
Thread.sleep(1);
}
} | Thread.sleep(1); | private void stallWhilePendingAbove(long pending) throws InterruptedException {
while (pendingBytes() > pending) {
Thread.sleep(1);
}
} | class TrackCompletition implements CompletionHandler {
final long written;
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
acked.addAndGet(written);
}
@Override
public void failed(Throwable t) {
acked.addAndGet(written);
}
} | class TrackCompletition implements CompletionHandler {
private final long written;
private final AtomicBoolean replied = new AtomicBoolean(false);
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
@Override
public void failed(Throwable t) {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
} |
Consider replacing with a single atomic that is decremented for `completed()`/`failed()` | private long pendingBytes() {
return sent.get() - acked.get();
} | return sent.get() - acked.get(); | private long pendingBytes() {
return sent.get() - acked.get();
} | class MaxPendingContentChannelOutputStream extends ContentChannelOutputStream {
private final ContentChannel channel;
private final long maxPending;
private AtomicLong sent = new AtomicLong(0);
private AtomicLong acked = new AtomicLong(0);
public MaxPendingContentChannelOutputStream(ContentChannel endpoint, long maxPending) {
super(endpoint);
this.channel = endpoint;
this.maxPending = maxPending;
}
private class TrackCompletition implements CompletionHandler {
final long written;
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
acked.addAndGet(written);
}
@Override
public void failed(Throwable t) {
acked.addAndGet(written);
}
}
@Override
public void send(ByteBuffer src) throws IOException {
try {
stallWhilePendingAbove(maxPending);
} catch (InterruptedException e) {}
send(src, new TrackCompletition(src.remaining()));
}
private void stallWhilePendingAbove(long pending) throws InterruptedException {
while (pendingBytes() > pending) {
Thread.sleep(1);
}
}
@Override
public void flush() throws IOException {
super.flush();
try {
stallWhilePendingAbove(0);
}
catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for underlying IO to complete", e);
}
}
} | class MaxPendingContentChannelOutputStream extends ContentChannelOutputStream {
private final long maxPending;
private final AtomicLong sent = new AtomicLong(0);
private final AtomicLong acked = new AtomicLong(0);
public MaxPendingContentChannelOutputStream(ContentChannel endpoint, long maxPending) {
super(endpoint);
this.maxPending = maxPending;
}
private class TrackCompletition implements CompletionHandler {
private final long written;
private final AtomicBoolean replied = new AtomicBoolean(false);
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
@Override
public void failed(Throwable t) {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
}
@Override
public void send(ByteBuffer src) throws IOException {
try {
stallWhilePendingAbove(maxPending);
} catch (InterruptedException ignored) {
throw new IOException("Interrupted waiting for IO");
}
CompletionHandler pendingTracker = new TrackCompletition(src.remaining());
try {
send(src, pendingTracker);
} catch (Throwable throwable) {
pendingTracker.failed(throwable);
throw throwable;
}
}
private void stallWhilePendingAbove(long pending) throws InterruptedException {
while (pendingBytes() > pending) {
Thread.sleep(1);
}
}
@Override
public void flush() throws IOException {
super.flush();
try {
stallWhilePendingAbove(0);
}
catch (InterruptedException e) {
throw new IOException("Interrupted waiting for IO");
}
}
} |
I have concluded that most of the time it is better to have separate counters both counting up. 1 - There will be less contention. The cost is always on the write. Reading the counters is cheap as it requires no serialization. 2 - You preserve more information both for debugging and if you want monitoring. | private long pendingBytes() {
return sent.get() - acked.get();
} | return sent.get() - acked.get(); | private long pendingBytes() {
return sent.get() - acked.get();
} | class MaxPendingContentChannelOutputStream extends ContentChannelOutputStream {
private final ContentChannel channel;
private final long maxPending;
private AtomicLong sent = new AtomicLong(0);
private AtomicLong acked = new AtomicLong(0);
public MaxPendingContentChannelOutputStream(ContentChannel endpoint, long maxPending) {
super(endpoint);
this.channel = endpoint;
this.maxPending = maxPending;
}
private class TrackCompletition implements CompletionHandler {
final long written;
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
acked.addAndGet(written);
}
@Override
public void failed(Throwable t) {
acked.addAndGet(written);
}
}
@Override
public void send(ByteBuffer src) throws IOException {
try {
stallWhilePendingAbove(maxPending);
} catch (InterruptedException e) {}
send(src, new TrackCompletition(src.remaining()));
}
private void stallWhilePendingAbove(long pending) throws InterruptedException {
while (pendingBytes() > pending) {
Thread.sleep(1);
}
}
@Override
public void flush() throws IOException {
super.flush();
try {
stallWhilePendingAbove(0);
}
catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for underlying IO to complete", e);
}
}
} | class MaxPendingContentChannelOutputStream extends ContentChannelOutputStream {
private final long maxPending;
private final AtomicLong sent = new AtomicLong(0);
private final AtomicLong acked = new AtomicLong(0);
public MaxPendingContentChannelOutputStream(ContentChannel endpoint, long maxPending) {
super(endpoint);
this.maxPending = maxPending;
}
private class TrackCompletition implements CompletionHandler {
private final long written;
private final AtomicBoolean replied = new AtomicBoolean(false);
TrackCompletition(long written) {
this.written = written;
sent.addAndGet(written);
}
@Override
public void completed() {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
@Override
public void failed(Throwable t) {
if (!replied.getAndSet(true)) {
acked.addAndGet(written);
}
}
}
@Override
public void send(ByteBuffer src) throws IOException {
try {
stallWhilePendingAbove(maxPending);
} catch (InterruptedException ignored) {
throw new IOException("Interrupted waiting for IO");
}
CompletionHandler pendingTracker = new TrackCompletition(src.remaining());
try {
send(src, pendingTracker);
} catch (Throwable throwable) {
pendingTracker.failed(throwable);
throw throwable;
}
}
private void stallWhilePendingAbove(long pending) throws InterruptedException {
while (pendingBytes() > pending) {
Thread.sleep(1);
}
}
@Override
public void flush() throws IOException {
super.flush();
try {
stallWhilePendingAbove(0);
}
catch (InterruptedException e) {
throw new IOException("Interrupted waiting for IO");
}
}
} |
How about just `archiveUri`? | private void toSlime(Node node, boolean allFields, Cursor object) {
object.setString("url", nodeParentUrl + node.hostname());
if ( ! allFields) return;
object.setString("id", node.hostname());
object.setString("state", NodeSerializer.toString(node.state()));
object.setString("type", NodeSerializer.toString(node.type()));
object.setString("hostname", node.hostname());
if (node.parentHostname().isPresent()) {
object.setString("parentHostname", node.parentHostname().get());
}
object.setString("openStackId", node.id());
object.setString("flavor", node.flavor().name());
node.reservedTo().ifPresent(reservedTo -> object.setString("reservedTo", reservedTo.value()));
node.exclusiveTo().ifPresent(exclusiveTo -> object.setString("exclusiveTo", exclusiveTo.serializedForm()));
if (node.flavor().isConfigured())
object.setDouble("cpuCores", node.flavor().resources().vcpu());
NodeResourcesSerializer.toSlime(node.flavor().resources(), object.setObject("resources"));
if (node.flavor().cost() > 0)
object.setLong("cost", node.flavor().cost());
object.setString("environment", node.flavor().getType().name());
node.allocation().ifPresent(allocation -> {
toSlime(allocation.owner(), object.setObject("owner"));
toSlime(allocation.membership(), object.setObject("membership"));
object.setLong("restartGeneration", allocation.restartGeneration().wanted());
object.setLong("currentRestartGeneration", allocation.restartGeneration().current());
object.setString("wantedDockerImage", allocation.membership().cluster().dockerImage()
.orElseGet(() -> nodeRepository.containerImages().imageFor(node.type()).withTag(allocation.membership().cluster().vespaVersion()).asString()));
object.setString("wantedVespaVersion", allocation.membership().cluster().vespaVersion().toFullString());
NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject("requestedResources"));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray("networkPorts")));
orchestrator.apply(new HostName(node.hostname()))
.ifPresent(info -> {
object.setBool("allowedToBeDown", info.status().isSuspended());
if (info.status() != HostStatus.NO_REMARKS) {
object.setString("orchestratorStatus", info.status().asString());
}
info.suspendedSince().ifPresent(since -> object.setLong("suspendedSinceMillis", since.toEpochMilli()));
});
});
object.setLong("rebootGeneration", node.status().reboot().wanted());
object.setLong("currentRebootGeneration", node.status().reboot().current());
node.status().osVersion().current().ifPresent(version -> object.setString("currentOsVersion", version.toFullString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString("wantedOsVersion", version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong("currentFirmwareCheck", instant.toEpochMilli()));
if (node.type().isHost())
nodeRepository.firmwareChecks().requiredAfter().ifPresent(after -> object.setLong("wantedFirmwareCheck", after.toEpochMilli()));
node.status().vespaVersion().ifPresent(version -> object.setString("vespaVersion", version.toFullString()));
currentDockerImage(node).ifPresent(dockerImage -> object.setString("currentDockerImage", dockerImage.asString()));
object.setLong("failCount", node.status().failCount());
object.setBool("wantToRetire", node.status().wantToRetire());
object.setBool("wantToDeprovision", node.status().wantToDeprovision());
toSlime(node.history(), object.setArray("history"));
ipAddressesToSlime(node.ipConfig().primary(), object.setArray("ipAddresses"));
ipAddressesToSlime(node.ipConfig().pool().getIpSet(), object.setArray("additionalIpAddresses"));
addressesToSlime(node.ipConfig().pool().getAddressList(), object);
node.reports().toSlime(object, "reports");
node.modelName().ifPresent(modelName -> object.setString("modelName", modelName));
node.switchHostname().ifPresent(switchHostname -> object.setString("switchHostname", switchHostname));
nodeArchiveUri(nodeRepository.flagSource(), node).ifPresent(url -> object.setString("nodeArchiveUri", url));
} | nodeArchiveUri(nodeRepository.flagSource(), node).ifPresent(url -> object.setString("nodeArchiveUri", url)); | private void toSlime(Node node, boolean allFields, Cursor object) {
object.setString("url", nodeParentUrl + node.hostname());
if ( ! allFields) return;
object.setString("id", node.hostname());
object.setString("state", NodeSerializer.toString(node.state()));
object.setString("type", NodeSerializer.toString(node.type()));
object.setString("hostname", node.hostname());
if (node.parentHostname().isPresent()) {
object.setString("parentHostname", node.parentHostname().get());
}
object.setString("openStackId", node.id());
object.setString("flavor", node.flavor().name());
node.reservedTo().ifPresent(reservedTo -> object.setString("reservedTo", reservedTo.value()));
node.exclusiveTo().ifPresent(exclusiveTo -> object.setString("exclusiveTo", exclusiveTo.serializedForm()));
if (node.flavor().isConfigured())
object.setDouble("cpuCores", node.flavor().resources().vcpu());
NodeResourcesSerializer.toSlime(node.flavor().resources(), object.setObject("resources"));
if (node.flavor().cost() > 0)
object.setLong("cost", node.flavor().cost());
object.setString("environment", node.flavor().getType().name());
node.allocation().ifPresent(allocation -> {
toSlime(allocation.owner(), object.setObject("owner"));
toSlime(allocation.membership(), object.setObject("membership"));
object.setLong("restartGeneration", allocation.restartGeneration().wanted());
object.setLong("currentRestartGeneration", allocation.restartGeneration().current());
object.setString("wantedDockerImage", allocation.membership().cluster().dockerImage()
.orElseGet(() -> nodeRepository.containerImages().imageFor(node.type()).withTag(allocation.membership().cluster().vespaVersion()).asString()));
object.setString("wantedVespaVersion", allocation.membership().cluster().vespaVersion().toFullString());
NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject("requestedResources"));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray("networkPorts")));
orchestrator.apply(new HostName(node.hostname()))
.ifPresent(info -> {
object.setBool("allowedToBeDown", info.status().isSuspended());
if (info.status() != HostStatus.NO_REMARKS) {
object.setString("orchestratorStatus", info.status().asString());
}
info.suspendedSince().ifPresent(since -> object.setLong("suspendedSinceMillis", since.toEpochMilli()));
});
});
object.setLong("rebootGeneration", node.status().reboot().wanted());
object.setLong("currentRebootGeneration", node.status().reboot().current());
node.status().osVersion().current().ifPresent(version -> object.setString("currentOsVersion", version.toFullString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString("wantedOsVersion", version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong("currentFirmwareCheck", instant.toEpochMilli()));
if (node.type().isHost())
nodeRepository.firmwareChecks().requiredAfter().ifPresent(after -> object.setLong("wantedFirmwareCheck", after.toEpochMilli()));
node.status().vespaVersion().ifPresent(version -> object.setString("vespaVersion", version.toFullString()));
currentDockerImage(node).ifPresent(dockerImage -> object.setString("currentDockerImage", dockerImage.asString()));
object.setLong("failCount", node.status().failCount());
object.setBool("wantToRetire", node.status().wantToRetire());
object.setBool("wantToDeprovision", node.status().wantToDeprovision());
toSlime(node.history(), object.setArray("history"));
ipAddressesToSlime(node.ipConfig().primary(), object.setArray("ipAddresses"));
ipAddressesToSlime(node.ipConfig().pool().getIpSet(), object.setArray("additionalIpAddresses"));
addressesToSlime(node.ipConfig().pool().getAddressList(), object);
node.reports().toSlime(object, "reports");
node.modelName().ifPresent(modelName -> object.setString("modelName", modelName));
node.switchHostname().ifPresent(switchHostname -> object.setString("switchHostname", switchHostname));
archiveUri(nodeRepository.flagSource(), node).ifPresent(uri -> object.setString("archiveUri", uri));
} | class NodesResponse extends SlimeJsonResponse {
/** The responses this can create */
public enum ResponseType { nodeList, stateList, nodesInStateList, singleNode }
/** The request url minus parameters, with a trailing slash added if missing */
private final String parentUrl;
/** The parent url of nodes */
private final String nodeParentUrl;
private final NodeFilter filter;
private final boolean recursive;
private final Function<HostName, Optional<HostInfo>> orchestrator;
private final NodeRepository nodeRepository;
public NodesResponse(ResponseType responseType, HttpRequest request,
Orchestrator orchestrator, NodeRepository nodeRepository) {
this.parentUrl = toParentUrl(request);
this.nodeParentUrl = toNodeParentUrl(request);
this.filter = NodesV2ApiHandler.toNodeFilter(request);
this.recursive = request.getBooleanProperty("recursive");
this.orchestrator = orchestrator.getHostResolver();
this.nodeRepository = nodeRepository;
Cursor root = slime.setObject();
switch (responseType) {
case nodeList: nodesToSlime(root); break;
case stateList : statesToSlime(root); break;
case nodesInStateList: nodesToSlime(NodeSerializer.stateFrom(lastElement(parentUrl)), root); break;
case singleNode : nodeToSlime(lastElement(parentUrl), root); break;
default: throw new IllegalArgumentException();
}
}
private String toParentUrl(HttpRequest request) {
URI uri = request.getUri();
String parentUrl = uri.getScheme() + ":
if ( ! parentUrl.endsWith("/"))
parentUrl = parentUrl + "/";
return parentUrl;
}
private String toNodeParentUrl(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
private void statesToSlime(Cursor root) {
Cursor states = root.setObject("states");
for (Node.State state : Node.State.values())
toSlime(state, states.setObject(NodeSerializer.toString(state)));
}
private void toSlime(Node.State state, Cursor object) {
object.setString("url", parentUrl + NodeSerializer.toString(state));
if (recursive)
nodesToSlime(state, object);
}
/** Outputs the nodes in the given state to a node array */
private void nodesToSlime(Node.State state, Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
for (NodeType type : NodeType.values())
toSlime(nodeRepository.nodes().list(state).nodeType(type).asList(), nodeArray);
}
/** Outputs all the nodes to a node array */
private void nodesToSlime(Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
toSlime(nodeRepository.nodes().list().asList(), nodeArray);
}
private void toSlime(List<Node> nodes, Cursor array) {
for (Node node : nodes) {
if ( ! filter.matches(node)) continue;
toSlime(node, recursive, array.addObject());
}
}
private void nodeToSlime(String hostname, Cursor object) {
Node node = nodeRepository.nodes().node(hostname).orElseThrow(() ->
new NotFoundException("No node with hostname '" + hostname + "'"));
toSlime(node, true, object);
}
@SuppressWarnings("deprecation")
private void toSlime(ApplicationId id, Cursor object) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
}
private void toSlime(ClusterMembership membership, Cursor object) {
object.setString("clustertype", membership.cluster().type().name());
object.setString("clusterid", membership.cluster().id().value());
object.setString("group", String.valueOf(membership.cluster().group().get().index()));
object.setLong("index", membership.index());
object.setBool("retired", membership.retired());
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events()) {
Cursor object = array.addObject();
object.setString("event", event.type().name());
object.setLong("at", event.at().toEpochMilli());
object.setString("agent", event.agent().name());
}
}
private Optional<DockerImage> currentDockerImage(Node node) {
return node.status().containerImage()
.or(() -> Optional.of(node)
.filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
.flatMap(n -> n.status().vespaVersion()
.map(version -> nodeRepository.containerImages().imageFor(n.type()).withTag(version))));
}
private void ipAddressesToSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.forEach(array::addString);
}
private void addressesToSlime(List<Address> addresses, Cursor object) {
if (addresses.isEmpty()) return;
Cursor addressesArray = object.setArray("additionalHostnames");
addresses.forEach(address -> addressesArray.addString(address.hostname()));
}
private String lastElement(String path) {
if (path.endsWith("/"))
path = path.substring(0, path.length()-1);
int lastSlash = path.lastIndexOf("/");
if (lastSlash < 0) return path;
return path.substring(lastSlash+1);
}
static Optional<String> nodeArchiveUri(FlagSource flagSource, Node node) {
String bucket = Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.bindTo(flagSource)
.with(FetchVector.Dimension.NODE_TYPE, node.type().name())
.with(FetchVector.Dimension.APPLICATION_ID, node.allocation().map(alloc -> alloc.owner().serializedForm()).orElse(null))
.value();
if (bucket.isBlank()) return Optional.empty();
StringBuilder sb = new StringBuilder(100).append("s3:
if (node.type() == NodeType.tenant) {
if (node.allocation().isEmpty()) return Optional.empty();
ApplicationId app = node.allocation().get().owner();
sb.append(app.tenant().value()).append('/').append(app.application().value()).append('/').append(app.instance().value()).append('/');
} else {
sb.append("hosted-vespa/");
}
for (char c: node.hostname().toCharArray()) {
if (c == '.') break;
sb.append(c);
}
return Optional.of(sb.append('/').toString());
}
} | class NodesResponse extends SlimeJsonResponse {
/** The responses this can create */
public enum ResponseType { nodeList, stateList, nodesInStateList, singleNode }
/** The request url minus parameters, with a trailing slash added if missing */
private final String parentUrl;
/** The parent url of nodes */
private final String nodeParentUrl;
private final NodeFilter filter;
private final boolean recursive;
private final Function<HostName, Optional<HostInfo>> orchestrator;
private final NodeRepository nodeRepository;
public NodesResponse(ResponseType responseType, HttpRequest request,
Orchestrator orchestrator, NodeRepository nodeRepository) {
this.parentUrl = toParentUrl(request);
this.nodeParentUrl = toNodeParentUrl(request);
this.filter = NodesV2ApiHandler.toNodeFilter(request);
this.recursive = request.getBooleanProperty("recursive");
this.orchestrator = orchestrator.getHostResolver();
this.nodeRepository = nodeRepository;
Cursor root = slime.setObject();
switch (responseType) {
case nodeList: nodesToSlime(root); break;
case stateList : statesToSlime(root); break;
case nodesInStateList: nodesToSlime(NodeSerializer.stateFrom(lastElement(parentUrl)), root); break;
case singleNode : nodeToSlime(lastElement(parentUrl), root); break;
default: throw new IllegalArgumentException();
}
}
private String toParentUrl(HttpRequest request) {
URI uri = request.getUri();
String parentUrl = uri.getScheme() + ":
if ( ! parentUrl.endsWith("/"))
parentUrl = parentUrl + "/";
return parentUrl;
}
private String toNodeParentUrl(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
private void statesToSlime(Cursor root) {
Cursor states = root.setObject("states");
for (Node.State state : Node.State.values())
toSlime(state, states.setObject(NodeSerializer.toString(state)));
}
private void toSlime(Node.State state, Cursor object) {
object.setString("url", parentUrl + NodeSerializer.toString(state));
if (recursive)
nodesToSlime(state, object);
}
/** Outputs the nodes in the given state to a node array */
private void nodesToSlime(Node.State state, Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
for (NodeType type : NodeType.values())
toSlime(nodeRepository.nodes().list(state).nodeType(type).asList(), nodeArray);
}
/** Outputs all the nodes to a node array */
private void nodesToSlime(Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
toSlime(nodeRepository.nodes().list().asList(), nodeArray);
}
private void toSlime(List<Node> nodes, Cursor array) {
for (Node node : nodes) {
if ( ! filter.matches(node)) continue;
toSlime(node, recursive, array.addObject());
}
}
private void nodeToSlime(String hostname, Cursor object) {
Node node = nodeRepository.nodes().node(hostname).orElseThrow(() ->
new NotFoundException("No node with hostname '" + hostname + "'"));
toSlime(node, true, object);
}
@SuppressWarnings("deprecation")
private void toSlime(ApplicationId id, Cursor object) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
}
private void toSlime(ClusterMembership membership, Cursor object) {
object.setString("clustertype", membership.cluster().type().name());
object.setString("clusterid", membership.cluster().id().value());
object.setString("group", String.valueOf(membership.cluster().group().get().index()));
object.setLong("index", membership.index());
object.setBool("retired", membership.retired());
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events()) {
Cursor object = array.addObject();
object.setString("event", event.type().name());
object.setLong("at", event.at().toEpochMilli());
object.setString("agent", event.agent().name());
}
}
private Optional<DockerImage> currentDockerImage(Node node) {
return node.status().containerImage()
.or(() -> Optional.of(node)
.filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
.flatMap(n -> n.status().vespaVersion()
.map(version -> nodeRepository.containerImages().imageFor(n.type()).withTag(version))));
}
private void ipAddressesToSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.forEach(array::addString);
}
private void addressesToSlime(List<Address> addresses, Cursor object) {
if (addresses.isEmpty()) return;
Cursor addressesArray = object.setArray("additionalHostnames");
addresses.forEach(address -> addressesArray.addString(address.hostname()));
}
private String lastElement(String path) {
if (path.endsWith("/"))
path = path.substring(0, path.length()-1);
int lastSlash = path.lastIndexOf("/");
if (lastSlash < 0) return path;
return path.substring(lastSlash+1);
}
static Optional<String> archiveUri(FlagSource flagSource, Node node) {
String bucket = Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.bindTo(flagSource)
.with(FetchVector.Dimension.NODE_TYPE, node.type().name())
.with(FetchVector.Dimension.APPLICATION_ID, node.allocation().map(alloc -> alloc.owner().serializedForm()).orElse(null))
.value();
if (bucket.isBlank()) return Optional.empty();
StringBuilder sb = new StringBuilder(100).append("s3:
if (node.type() == NodeType.tenant) {
if (node.allocation().isEmpty()) return Optional.empty();
ApplicationId app = node.allocation().get().owner();
sb.append(app.tenant().value()).append('/').append(app.application().value()).append('/').append(app.instance().value()).append('/');
} else {
sb.append("hosted-vespa/");
}
for (char c: node.hostname().toCharArray()) {
if (c == '.') break;
sb.append(c);
}
return Optional.of(sb.append('/').toString());
}
} |
Side note: as far as I can recall, distributors are not set to `retired` as they do not themselves have any data, so that particular check is not likely to ever trigger. No harm in having it around, though. | private Result checkAllNodesAreUp(ClusterState clusterState) {
for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfo()) {
State wantedState = storageNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another storage node wants state " +
wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another storage node has state " + state.toString().toUpperCase() +
": " + storageNodeInfo.getNodeIndex());
}
}
for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfo()) {
State wantedState = distributorNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another distributor has state " + state.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
}
return Result.allowSettingOfWantedState();
} | if (wantedState != State.UP && wantedState != State.RETIRED) { | private Result checkAllNodesAreUp(ClusterState clusterState) {
for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfo()) {
State wantedState = storageNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another storage node wants state " +
wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another storage node has state " + state.toString().toUpperCase() +
": " + storageNodeInfo.getNodeIndex());
}
}
for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfo()) {
State wantedState = distributorNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another distributor has state " + state.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
}
return Result.allowSettingOfWantedState();
} | class Result {
public enum Action {
MUST_SET_WANTED_STATE,
ALREADY_SET,
DISALLOWED
}
private final Action action;
private final String reason;
private Result(Action action, String reason) {
this.action = action;
this.reason = reason;
}
public static Result createDisallowed(String reason) {
return new Result(Action.DISALLOWED, reason);
}
public static Result allowSettingOfWantedState() {
return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different");
}
public static Result createAlreadySet() {
return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective");
}
public boolean settingWantedStateIsAllowed() {
return action == Action.MUST_SET_WANTED_STATE;
}
public boolean wantedStateAlreadySet() {
return action == Action.ALREADY_SET;
}
public String getReason() {
return reason;
}
public String toString() {
return "action " + action + ": " + reason;
}
} | class Result {
public enum Action {
MUST_SET_WANTED_STATE,
ALREADY_SET,
DISALLOWED
}
private final Action action;
private final String reason;
private Result(Action action, String reason) {
this.action = action;
this.reason = reason;
}
public static Result createDisallowed(String reason) {
return new Result(Action.DISALLOWED, reason);
}
public static Result allowSettingOfWantedState() {
return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different");
}
public static Result createAlreadySet() {
return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective");
}
public boolean settingWantedStateIsAllowed() {
return action == Action.MUST_SET_WANTED_STATE;
}
public boolean wantedStateAlreadySet() {
return action == Action.ALREADY_SET;
}
public String getReason() {
return reason;
}
public String toString() {
return "action " + action + ": " + reason;
}
} |
Is it possible for this to cause upgrade "starvation" by nodes tagged as `permanently down`? Or are such nodes kept around for such a brief period of time that it does not matter in practice? | private Result checkAllNodesAreUp(ClusterState clusterState) {
for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfo()) {
State wantedState = storageNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another storage node wants state " +
wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another storage node has state " + state.toString().toUpperCase() +
": " + storageNodeInfo.getNodeIndex());
}
}
for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfo()) {
State wantedState = distributorNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another distributor has state " + state.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
}
return Result.allowSettingOfWantedState();
} | wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); | private Result checkAllNodesAreUp(ClusterState clusterState) {
for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfo()) {
State wantedState = storageNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another storage node wants state " +
wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another storage node has state " + state.toString().toUpperCase() +
": " + storageNodeInfo.getNodeIndex());
}
}
for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfo()) {
State wantedState = distributorNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another distributor has state " + state.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
}
return Result.allowSettingOfWantedState();
} | class Result {
public enum Action {
MUST_SET_WANTED_STATE,
ALREADY_SET,
DISALLOWED
}
private final Action action;
private final String reason;
private Result(Action action, String reason) {
this.action = action;
this.reason = reason;
}
public static Result createDisallowed(String reason) {
return new Result(Action.DISALLOWED, reason);
}
public static Result allowSettingOfWantedState() {
return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different");
}
public static Result createAlreadySet() {
return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective");
}
public boolean settingWantedStateIsAllowed() {
return action == Action.MUST_SET_WANTED_STATE;
}
public boolean wantedStateAlreadySet() {
return action == Action.ALREADY_SET;
}
public String getReason() {
return reason;
}
public String toString() {
return "action " + action + ": " + reason;
}
} | class Result {
public enum Action {
MUST_SET_WANTED_STATE,
ALREADY_SET,
DISALLOWED
}
private final Action action;
private final String reason;
private Result(Action action, String reason) {
this.action = action;
this.reason = reason;
}
public static Result createDisallowed(String reason) {
return new Result(Action.DISALLOWED, reason);
}
public static Result allowSettingOfWantedState() {
return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different");
}
public static Result createAlreadySet() {
return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective");
}
public boolean settingWantedStateIsAllowed() {
return action == Action.MUST_SET_WANTED_STATE;
}
public boolean wantedStateAlreadySet() {
return action == Action.ALREADY_SET;
}
public String getReason() {
return reason;
}
public String toString() {
return "action " + action + ": " + reason;
}
} |
When a node is permanently set down, the application is redeployed immediately to remove the node. If such a redeployment fails, e.g. a redeployment requires a new node but there is no spare capacity, then the permanently down node will linger and starve upgrades++. I haven't witnessed this yet. Come to think of it -- requiring all other nodes are up when setting a node permanently down seems too stringent. We verify the node is on the latest cluster state version, and manages no buckets, so that should be sufficient? Let me remove the check on all nodes being up. | private Result checkAllNodesAreUp(ClusterState clusterState) {
for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfo()) {
State wantedState = storageNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another storage node wants state " +
wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another storage node has state " + state.toString().toUpperCase() +
": " + storageNodeInfo.getNodeIndex());
}
}
for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfo()) {
State wantedState = distributorNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another distributor has state " + state.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
}
return Result.allowSettingOfWantedState();
} | wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); | private Result checkAllNodesAreUp(ClusterState clusterState) {
for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfo()) {
State wantedState = storageNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another storage node wants state " +
wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another storage node has state " + state.toString().toUpperCase() +
": " + storageNodeInfo.getNodeIndex());
}
}
for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfo()) {
State wantedState = distributorNodeInfo.getUserWantedState().getState();
if (wantedState != State.UP && wantedState != State.RETIRED) {
return Result.createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState();
if (state != State.UP && state != State.RETIRED) {
return Result.createDisallowed("Another distributor has state " + state.toString().toUpperCase() +
": " + distributorNodeInfo.getNodeIndex());
}
}
return Result.allowSettingOfWantedState();
} | class Result {
public enum Action {
MUST_SET_WANTED_STATE,
ALREADY_SET,
DISALLOWED
}
private final Action action;
private final String reason;
private Result(Action action, String reason) {
this.action = action;
this.reason = reason;
}
public static Result createDisallowed(String reason) {
return new Result(Action.DISALLOWED, reason);
}
public static Result allowSettingOfWantedState() {
return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different");
}
public static Result createAlreadySet() {
return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective");
}
public boolean settingWantedStateIsAllowed() {
return action == Action.MUST_SET_WANTED_STATE;
}
public boolean wantedStateAlreadySet() {
return action == Action.ALREADY_SET;
}
public String getReason() {
return reason;
}
public String toString() {
return "action " + action + ": " + reason;
}
} | class Result {
public enum Action {
MUST_SET_WANTED_STATE,
ALREADY_SET,
DISALLOWED
}
private final Action action;
private final String reason;
private Result(Action action, String reason) {
this.action = action;
this.reason = reason;
}
public static Result createDisallowed(String reason) {
return new Result(Action.DISALLOWED, reason);
}
public static Result allowSettingOfWantedState() {
return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different");
}
public static Result createAlreadySet() {
return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective");
}
public boolean settingWantedStateIsAllowed() {
return action == Action.MUST_SET_WANTED_STATE;
}
public boolean wantedStateAlreadySet() {
return action == Action.ALREADY_SET;
}
public String getReason() {
return reason;
}
public String toString() {
return "action " + action + ": " + reason;
}
} |
We should also warn when we failed to join. `Thread.isAlive()` or some such should allow us to check that. But can do that in a separate PR. Let's also consider shutting down the correct peer. | public void shutdown() {
if (quorumPeer != null) {
log.log(Level.INFO, "Shutting down ZooKeeper server");
try {
quorumPeer.shutdown();
quorumPeer.join(timeToWaitForShutdown.toMillis());
} catch (RuntimeException|InterruptedException e) {
Process.logAndDie("Failed to shut down ZooKeeper properly, forcing shutdown", e);
}
}
} | quorumPeer.join(timeToWaitForShutdown.toMillis()); | public void shutdown() {
if (quorumPeer != null) {
log.log(Level.INFO, "Shutting down ZooKeeper server");
try {
quorumPeer.shutdown();
quorumPeer.join(timeToWaitForShutdown.toMillis());
} catch (RuntimeException|InterruptedException e) {
Process.logAndDie("Failed to shut down ZooKeeper properly, forcing shutdown", e);
}
}
} | class VespaQuorumPeer extends QuorumPeerMain {
private static final Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName());
private static final Duration timeToWaitForShutdown = Duration.ofSeconds(60);
public void start(Path path) {
initializeAndRun(new String[]{ path.toFile().getAbsolutePath()});
}
@Override
protected void initializeAndRun(String[] args) {
try {
super.initializeAndRun(args);
} catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) {
throw new RuntimeException("Exception when initializing or running ZooKeeper server", e);
}
}
} | class VespaQuorumPeer extends QuorumPeerMain {
private static final Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName());
private static final Duration timeToWaitForShutdown = Duration.ofSeconds(60);
public void start(Path path) {
initializeAndRun(new String[]{ path.toFile().getAbsolutePath()});
}
@Override
protected void initializeAndRun(String[] args) {
try {
super.initializeAndRun(args);
} catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) {
throw new RuntimeException("Exception when initializing or running ZooKeeper server", e);
}
}
} |
Extend or overload createContextForSingleAppOp to take a `boolean probe` argument, to avoid creating 2 nested contexts. | public boolean isQuiescent(ApplicationId id) {
try {
ApplicationInstance application = serviceMonitor.getApplication(OrchestratorUtil.toApplicationInstanceReference(id, serviceMonitor))
.orElseThrow(ApplicationIdNotFoundException::new);
List<ServiceCluster> contentClusters = application.serviceClusters().stream()
.filter(VespaModelUtil::isContent)
.collect(toList());
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock)
.createSubcontextForSingleAppOp(true);
for (ServiceCluster cluster : contentClusters) {
List<HostName> clusterControllers = VespaModelUtil.getClusterControllerInstancesInOrder(application, cluster.clusterId());
ClusterControllerClient client = clusterControllerClientFactory.createClient(clusterControllers, cluster.clusterId().s());
for (ServiceInstance service : cluster.serviceInstances()) {
try {
ClusterControllerStateResponse response = client.setNodeState(context,
VespaModelUtil.getStorageNodeIndex(service.configId()),
MAINTENANCE);
if ( ! response.wasModified)
return false;
}
catch (Exception e) {
log.log(Level.INFO, "Failed probing for permission to set " + service + " in MAINTENANCE: " + Exceptions.toMessageString(e));
return false;
}
}
}
return true;
}
catch (ApplicationIdNotFoundException ignored) {
return false;
}
} | OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock) | public boolean isQuiescent(ApplicationId id) {
try {
ApplicationInstance application = serviceMonitor.getApplication(OrchestratorUtil.toApplicationInstanceReference(id, serviceMonitor))
.orElseThrow(ApplicationIdNotFoundException::new);
List<ServiceCluster> contentClusters = application.serviceClusters().stream()
.filter(VespaModelUtil::isContent)
.collect(toList());
OrchestratorContext context = OrchestratorContext.createContextForBatchProbe(clock);
for (ServiceCluster cluster : contentClusters) {
List<HostName> clusterControllers = VespaModelUtil.getClusterControllerInstancesInOrder(application, cluster.clusterId());
ClusterControllerClient client = clusterControllerClientFactory.createClient(clusterControllers, cluster.clusterId().s());
for (ServiceInstance service : cluster.serviceInstances()) {
try {
ClusterControllerStateResponse response = client.setNodeState(context,
VespaModelUtil.getStorageNodeIndex(service.configId()),
MAINTENANCE);
if ( ! response.wasModified)
return false;
}
catch (Exception e) {
log.log(Level.INFO, "Failed probing for permission to set " + service + " in MAINTENANCE: " + Exceptions.toMessageString(e));
return false;
}
}
}
return true;
}
catch (ApplicationIdNotFoundException ignored) {
return false;
}
} | class OrchestratorImpl implements Orchestrator {
private static final Logger log = Logger.getLogger(OrchestratorImpl.class.getName());
private final Policy policy;
private final StatusService statusService;
private final ServiceMonitor serviceMonitor;
private final int serviceMonitorConvergenceLatencySeconds;
private final ClusterControllerClientFactory clusterControllerClientFactory;
private final Clock clock;
private final ApplicationApiFactory applicationApiFactory;
@Inject
public OrchestratorImpl(ClusterControllerClientFactory clusterControllerClientFactory,
StatusService statusService,
OrchestratorConfig orchestratorConfig,
ServiceMonitor serviceMonitor,
ConfigserverConfig configServerConfig,
FlagSource flagSource)
{
this(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource),
clusterControllerClientFactory,
new ApplicationApiFactory(configServerConfig.zookeeperserver().size(), Clock.systemUTC())),
clusterControllerClientFactory,
statusService,
serviceMonitor,
orchestratorConfig.serviceMonitorConvergenceLatencySeconds(),
Clock.systemUTC(),
new ApplicationApiFactory(configServerConfig.zookeeperserver().size(), Clock.systemUTC()),
flagSource);
}
public OrchestratorImpl(Policy policy,
ClusterControllerClientFactory clusterControllerClientFactory,
StatusService statusService,
ServiceMonitor serviceMonitor,
int serviceMonitorConvergenceLatencySeconds,
Clock clock,
ApplicationApiFactory applicationApiFactory,
FlagSource flagSource)
{
this.policy = policy;
this.clusterControllerClientFactory = clusterControllerClientFactory;
this.statusService = statusService;
this.serviceMonitorConvergenceLatencySeconds = serviceMonitorConvergenceLatencySeconds;
this.serviceMonitor = serviceMonitor;
this.clock = clock;
this.applicationApiFactory = applicationApiFactory;
serviceMonitor.registerListener(statusService);
}
@Override
public Host getHost(HostName hostName) throws HostNameNotFoundException {
ApplicationInstance applicationInstance = serviceMonitor
.getApplicationNarrowedTo(hostName)
.orElseThrow(() -> new HostNameNotFoundException(hostName));
List<ServiceInstance> serviceInstances = applicationInstance
.serviceClusters().stream()
.flatMap(cluster -> cluster.serviceInstances().stream())
.filter(serviceInstance -> hostName.equals(serviceInstance.hostName()))
.collect(toList());
HostInfo hostInfo = statusService.getHostInfo(applicationInstance.reference(), hostName);
return new Host(hostName, hostInfo, applicationInstance.reference(), serviceInstances);
}
@Override
public HostStatus getNodeStatus(HostName hostName) throws HostNameNotFoundException {
ApplicationInstanceReference reference = getApplicationInstanceReference(hostName);
return statusService.getHostInfo(reference, hostName).status();
}
@Override
public HostInfo getHostInfo(ApplicationInstanceReference reference, HostName hostname) {
return statusService.getHostInfo(reference, hostname);
}
@Override
public Function<HostName, Optional<HostInfo>> getHostResolver() {
return hostName -> serviceMonitor
.getApplicationInstanceReference(hostName)
.map(reference -> statusService.getHostInfo(reference, hostName));
}
@Override
public void setNodeStatus(HostName hostName, HostStatus status) throws OrchestrationException {
ApplicationInstanceReference reference = getApplicationInstanceReference(hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, reference)) {
lock.setHostState(hostName, status);
}
}
@Override
public void resume(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {
/*
* When making a state transition to this state, we have to consider that if the host has been in
* ALLOWED_TO_BE_DOWN state, services on the host may recently have been stopped (and, presumably, started).
* Service monitoring may not have had enough time to detect that services were stopped,
* and may therefore mistakenly report services as up, even if they still haven't initialized and
* are not yet ready for serving. Erroneously reporting both host and services as up causes a race
* where services on other hosts may be stopped prematurely. A delay here ensures that service
* monitoring will have had time to catch up. Since we don't want do the delay with the lock held,
* and the host status service's locking functionality does not support something like condition
* variables or Object.wait(), we break out here, releasing the lock before delaying.
*
* 2020-02-07: We should utilize suspendedSince timestamp on the HostInfo: The above
* is equivalent to guaranteeing a minimum time after suspendedSince, before checking
* the health with service monitor. This should for all practical purposes remove
* the amount of time in this sleep.
* Caveat: Cannot be implemented before lingering HostInfo has been fixed (VESPA-17546).
*/
sleep(serviceMonitorConvergenceLatencySeconds, TimeUnit.SECONDS);
ApplicationInstance appInstance = getApplicationInstance(hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, appInstance.reference())) {
HostStatus currentHostState = lock.getHostInfos().getOrNoRemarks(hostName).status();
if (currentHostState == HostStatus.NO_REMARKS) {
return;
}
if (currentHostState == HostStatus.PERMANENTLY_DOWN ||
lock.getApplicationInstanceStatus() == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
return;
}
policy.releaseSuspensionGrant(context.createSubcontextWithinLock(), appInstance, hostName, lock);
}
}
@Override
public void suspend(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {
ApplicationInstance appInstance = getApplicationInstance(hostName);
NodeGroup nodeGroup = new NodeGroup(appInstance, hostName);
suspendGroup(OrchestratorContext.createContextForSingleAppOp(clock), nodeGroup);
}
@Override
public void acquirePermissionToRemove(HostName hostName) throws OrchestrationException {
ApplicationInstance appInstance = getApplicationInstance(hostName);
NodeGroup nodeGroup = new NodeGroup(appInstance, hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, appInstance.reference())) {
ApplicationApi applicationApi = applicationApiFactory.create(nodeGroup, lock, clusterControllerClientFactory);
policy.acquirePermissionToRemove(context.createSubcontextWithinLock(), applicationApi);
}
}
/**
* Suspend normal operations for a group of nodes in the same application.
*
* @param nodeGroup The group of nodes in an application.
* @throws HostStateChangeDeniedException if the request cannot be met due to policy constraints.
*/
void suspendGroup(OrchestratorContext context, NodeGroup nodeGroup) throws HostStateChangeDeniedException {
ApplicationInstanceReference applicationReference = nodeGroup.getApplicationReference();
final SuspensionReasons suspensionReasons;
try (ApplicationLock lock = statusService.lockApplication(context, applicationReference)) {
ApplicationInstanceStatus appStatus = lock.getApplicationInstanceStatus();
if (appStatus == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
return;
}
ApplicationApi applicationApi = applicationApiFactory.create(
nodeGroup, lock, clusterControllerClientFactory);
suspensionReasons = policy.grantSuspensionRequest(context.createSubcontextWithinLock(), applicationApi);
}
suspensionReasons.makeLogMessage().ifPresent(log::info);
}
@Override
public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationId appId) throws ApplicationIdNotFoundException {
ApplicationInstanceReference reference = OrchestratorUtil.toApplicationInstanceReference(appId, serviceMonitor);
return statusService.getApplicationInstanceStatus(reference);
}
@Override
public Set<ApplicationId> getAllSuspendedApplications() {
Set<ApplicationInstanceReference> refSet = statusService.getAllSuspendedApplications();
return refSet.stream().map(OrchestratorUtil::toApplicationId).collect(toSet());
}
@Override
public void resume(ApplicationId appId) throws ApplicationIdNotFoundException, ApplicationStateChangeDeniedException {
setApplicationStatus(appId, ApplicationInstanceStatus.NO_REMARKS);
}
@Override
public void suspend(ApplicationId appId) throws ApplicationIdNotFoundException, ApplicationStateChangeDeniedException {
setApplicationStatus(appId, ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN);
}
@Override
public void suspendAll(HostName parentHostname, List<HostName> hostNames)
throws BatchHostStateChangeDeniedException, BatchHostNameNotFoundException, BatchInternalErrorException {
try (OrchestratorContext context = OrchestratorContext.createContextForMultiAppOp(clock)) {
List<NodeGroup> nodeGroupsOrderedByApplication;
try {
nodeGroupsOrderedByApplication = nodeGroupsOrderedForSuspend(hostNames);
} catch (HostNameNotFoundException e) {
throw new BatchHostNameNotFoundException(parentHostname, hostNames, e);
}
suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, true);
suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, false);
}
}
private void suspendAllNodeGroups(OrchestratorContext context,
HostName parentHostname,
List<NodeGroup> nodeGroupsOrderedByApplication,
boolean probe)
throws BatchHostStateChangeDeniedException, BatchInternalErrorException {
for (NodeGroup nodeGroup : nodeGroupsOrderedByApplication) {
try {
suspendGroup(context.createSubcontextForSingleAppOp(probe), nodeGroup);
} catch (HostStateChangeDeniedException e) {
throw new BatchHostStateChangeDeniedException(parentHostname, nodeGroup, e);
} catch (UncheckedTimeoutException e) {
throw e;
} catch (RuntimeException e) {
throw new BatchInternalErrorException(parentHostname, nodeGroup, e);
}
}
}
/**
* PROBLEM
* Take the example of 2 Docker hosts:
* - Docker host 1 has two nodes A1 and B1, belonging to the application with
* a globally unique ID A and B, respectively.
* - Similarly, Docker host 2 has two nodes running content nodes A2 and B2,
* and we assume both A1 and A2 (and B1 and B2) have services within the same service cluster.
*
* Suppose both Docker hosts wanting to reboot, and
* - Docker host 1 asks to suspend A1 and B1, while
* - Docker host 2 asks to suspend B2 and A2.
*
* The Orchestrator may allow suspend of A1 and B2, before requesting the suspension of B1 and A2.
* None of these can be suspended (assuming max 1 suspended content node per content cluster),
* and so both requests for suspension will fail.
*
* Note that it's not a deadlock - both client will fail immediately and resume both A1 and B2 before
* responding to the client, and if host 1 asks later w/o host 2 asking at the same time,
* it will be given permission to suspend. However if both hosts were to request in lock-step,
* there would be starvation. And in general, it would fail requests for suspension more
* than necessary.
*
* SOLUTION
* The solution we're using is to order the hostnames by the globally unique application instance ID,
* e.g. hosted-vespa:routing:dev:some-region:default. In the example above, it would guarantee
* Docker host 2 would ensure ask to suspend B2 before A2. We take care of that ordering here.
*
* NodeGroups complicate the above picture a little: Each A1, A2, B1, and B2 is a NodeGroup that may
* contain several nodes (on the same Docker host). But the argument still applies.
*/
private List<NodeGroup> nodeGroupsOrderedForSuspend(List<HostName> hostNames) throws HostNameNotFoundException {
Map<ApplicationInstanceReference, NodeGroup> nodeGroupMap = new HashMap<>(hostNames.size());
for (HostName hostName : hostNames) {
ApplicationInstance application = getApplicationInstance(hostName);
NodeGroup nodeGroup = nodeGroupMap.get(application.reference());
if (nodeGroup == null) {
nodeGroup = new NodeGroup(application);
nodeGroupMap.put(application.reference(), nodeGroup);
}
nodeGroup.addNode(hostName);
}
return nodeGroupMap.values().stream()
.sorted(OrchestratorImpl::compareNodeGroupsForSuspend)
.collect(toList());
}
private static int compareNodeGroupsForSuspend(NodeGroup leftNodeGroup, NodeGroup rightNodeGroup) {
ApplicationInstanceReference leftApplicationReference = leftNodeGroup.getApplicationReference();
ApplicationInstanceReference rightApplicationReference = rightNodeGroup.getApplicationReference();
return leftApplicationReference.asString().compareTo(rightApplicationReference.asString());
}
private void setApplicationStatus(ApplicationId appId, ApplicationInstanceStatus status)
throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException{
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
ApplicationInstanceReference reference = OrchestratorUtil.toApplicationInstanceReference(appId, serviceMonitor);
ApplicationInstance application = serviceMonitor.getApplication(reference)
.orElseThrow(ApplicationIdNotFoundException::new);
try (ApplicationLock lock = statusService.lockApplication(context, reference)) {
if (status == lock.getApplicationInstanceStatus()) return;
if (status == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
HostInfos hostInfosSnapshot = lock.getHostInfos();
OrchestratorUtil.getHostsUsedByApplicationInstance(application)
.stream()
.filter(hostname -> !hostInfosSnapshot.getOrNoRemarks(hostname).status().isSuspended())
.forEach(hostname -> lock.setHostState(hostname, HostStatus.ALLOWED_TO_BE_DOWN));
setClusterStateInController(context.createSubcontextWithinLock(), application, MAINTENANCE);
}
lock.setApplicationInstanceStatus(status);
}
}
@Override
private void setClusterStateInController(OrchestratorContext context,
ApplicationInstance application,
ClusterControllerNodeState state)
throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException {
Set<ClusterId> contentClusterIds = application.serviceClusters().stream()
.filter(VespaModelUtil::isContent)
.map(ServiceCluster::clusterId)
.collect(toSet());
for (ClusterId clusterId : contentClusterIds) {
List<HostName> clusterControllers = VespaModelUtil.getClusterControllerInstancesInOrder(application, clusterId);
ClusterControllerClient client = clusterControllerClientFactory.createClient(
clusterControllers,
clusterId.s());
try {
ClusterControllerStateResponse response = client.setApplicationState(context, state);
if (!response.wasModified) {
String msg = String.format("Fail to set application %s, cluster name %s to cluster state %s due to: %s",
application.applicationInstanceId(), clusterId, state, response.reason);
throw new ApplicationStateChangeDeniedException(msg);
}
} catch (IOException e) {
throw new ApplicationStateChangeDeniedException(e.getMessage());
} catch (UncheckedTimeoutException e) {
throw new ApplicationStateChangeDeniedException(
"Timed out while waiting for cluster controllers " + clusterControllers +
" with cluster ID " + clusterId.s() + ": " + e.getMessage());
}
}
}
private ApplicationInstanceReference getApplicationInstanceReference(HostName hostname) throws HostNameNotFoundException {
return serviceMonitor.getApplicationInstanceReference(hostname)
.orElseThrow(() -> new HostNameNotFoundException(hostname));
}
private ApplicationInstance getApplicationInstance(HostName hostName) throws HostNameNotFoundException{
return serviceMonitor.getApplication(hostName)
.orElseThrow(() -> new HostNameNotFoundException(hostName));
}
private static void sleep(long time, TimeUnit timeUnit) {
try {
Thread.sleep(timeUnit.toMillis(time));
} catch (InterruptedException e) {
throw new RuntimeException("Unexpectedly interrupted", e);
}
}
} | class OrchestratorImpl implements Orchestrator {
private static final Logger log = Logger.getLogger(OrchestratorImpl.class.getName());
private final Policy policy;
private final StatusService statusService;
private final ServiceMonitor serviceMonitor;
private final int serviceMonitorConvergenceLatencySeconds;
private final ClusterControllerClientFactory clusterControllerClientFactory;
private final Clock clock;
private final ApplicationApiFactory applicationApiFactory;
@Inject
public OrchestratorImpl(ClusterControllerClientFactory clusterControllerClientFactory,
StatusService statusService,
OrchestratorConfig orchestratorConfig,
ServiceMonitor serviceMonitor,
ConfigserverConfig configServerConfig,
FlagSource flagSource)
{
this(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource),
clusterControllerClientFactory,
new ApplicationApiFactory(configServerConfig.zookeeperserver().size(), Clock.systemUTC())),
clusterControllerClientFactory,
statusService,
serviceMonitor,
orchestratorConfig.serviceMonitorConvergenceLatencySeconds(),
Clock.systemUTC(),
new ApplicationApiFactory(configServerConfig.zookeeperserver().size(), Clock.systemUTC()),
flagSource);
}
public OrchestratorImpl(Policy policy,
ClusterControllerClientFactory clusterControllerClientFactory,
StatusService statusService,
ServiceMonitor serviceMonitor,
int serviceMonitorConvergenceLatencySeconds,
Clock clock,
ApplicationApiFactory applicationApiFactory,
FlagSource flagSource)
{
this.policy = policy;
this.clusterControllerClientFactory = clusterControllerClientFactory;
this.statusService = statusService;
this.serviceMonitorConvergenceLatencySeconds = serviceMonitorConvergenceLatencySeconds;
this.serviceMonitor = serviceMonitor;
this.clock = clock;
this.applicationApiFactory = applicationApiFactory;
serviceMonitor.registerListener(statusService);
}
@Override
public Host getHost(HostName hostName) throws HostNameNotFoundException {
ApplicationInstance applicationInstance = serviceMonitor
.getApplicationNarrowedTo(hostName)
.orElseThrow(() -> new HostNameNotFoundException(hostName));
List<ServiceInstance> serviceInstances = applicationInstance
.serviceClusters().stream()
.flatMap(cluster -> cluster.serviceInstances().stream())
.filter(serviceInstance -> hostName.equals(serviceInstance.hostName()))
.collect(toList());
HostInfo hostInfo = statusService.getHostInfo(applicationInstance.reference(), hostName);
return new Host(hostName, hostInfo, applicationInstance.reference(), serviceInstances);
}
@Override
public HostStatus getNodeStatus(HostName hostName) throws HostNameNotFoundException {
ApplicationInstanceReference reference = getApplicationInstanceReference(hostName);
return statusService.getHostInfo(reference, hostName).status();
}
@Override
public HostInfo getHostInfo(ApplicationInstanceReference reference, HostName hostname) {
return statusService.getHostInfo(reference, hostname);
}
@Override
public Function<HostName, Optional<HostInfo>> getHostResolver() {
return hostName -> serviceMonitor
.getApplicationInstanceReference(hostName)
.map(reference -> statusService.getHostInfo(reference, hostName));
}
@Override
public void setNodeStatus(HostName hostName, HostStatus status) throws OrchestrationException {
ApplicationInstanceReference reference = getApplicationInstanceReference(hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, reference)) {
lock.setHostState(hostName, status);
}
}
@Override
public void resume(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {
/*
* When making a state transition to this state, we have to consider that if the host has been in
* ALLOWED_TO_BE_DOWN state, services on the host may recently have been stopped (and, presumably, started).
* Service monitoring may not have had enough time to detect that services were stopped,
* and may therefore mistakenly report services as up, even if they still haven't initialized and
* are not yet ready for serving. Erroneously reporting both host and services as up causes a race
* where services on other hosts may be stopped prematurely. A delay here ensures that service
* monitoring will have had time to catch up. Since we don't want do the delay with the lock held,
* and the host status service's locking functionality does not support something like condition
* variables or Object.wait(), we break out here, releasing the lock before delaying.
*
* 2020-02-07: We should utilize suspendedSince timestamp on the HostInfo: The above
* is equivalent to guaranteeing a minimum time after suspendedSince, before checking
* the health with service monitor. This should for all practical purposes remove
* the amount of time in this sleep.
* Caveat: Cannot be implemented before lingering HostInfo has been fixed (VESPA-17546).
*/
sleep(serviceMonitorConvergenceLatencySeconds, TimeUnit.SECONDS);
ApplicationInstance appInstance = getApplicationInstance(hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, appInstance.reference())) {
HostStatus currentHostState = lock.getHostInfos().getOrNoRemarks(hostName).status();
if (currentHostState == HostStatus.NO_REMARKS) {
return;
}
if (currentHostState == HostStatus.PERMANENTLY_DOWN ||
lock.getApplicationInstanceStatus() == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
return;
}
policy.releaseSuspensionGrant(context.createSubcontextWithinLock(), appInstance, hostName, lock);
}
}
@Override
public void suspend(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {
ApplicationInstance appInstance = getApplicationInstance(hostName);
NodeGroup nodeGroup = new NodeGroup(appInstance, hostName);
suspendGroup(OrchestratorContext.createContextForSingleAppOp(clock), nodeGroup);
}
@Override
public void acquirePermissionToRemove(HostName hostName) throws OrchestrationException {
ApplicationInstance appInstance = getApplicationInstance(hostName);
NodeGroup nodeGroup = new NodeGroup(appInstance, hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, appInstance.reference())) {
ApplicationApi applicationApi = applicationApiFactory.create(nodeGroup, lock, clusterControllerClientFactory);
policy.acquirePermissionToRemove(context.createSubcontextWithinLock(), applicationApi);
}
}
/**
* Suspend normal operations for a group of nodes in the same application.
*
* @param nodeGroup The group of nodes in an application.
* @throws HostStateChangeDeniedException if the request cannot be met due to policy constraints.
*/
void suspendGroup(OrchestratorContext context, NodeGroup nodeGroup) throws HostStateChangeDeniedException {
ApplicationInstanceReference applicationReference = nodeGroup.getApplicationReference();
final SuspensionReasons suspensionReasons;
try (ApplicationLock lock = statusService.lockApplication(context, applicationReference)) {
ApplicationInstanceStatus appStatus = lock.getApplicationInstanceStatus();
if (appStatus == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
return;
}
ApplicationApi applicationApi = applicationApiFactory.create(
nodeGroup, lock, clusterControllerClientFactory);
suspensionReasons = policy.grantSuspensionRequest(context.createSubcontextWithinLock(), applicationApi);
}
suspensionReasons.makeLogMessage().ifPresent(log::info);
}
@Override
public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationId appId) throws ApplicationIdNotFoundException {
ApplicationInstanceReference reference = OrchestratorUtil.toApplicationInstanceReference(appId, serviceMonitor);
return statusService.getApplicationInstanceStatus(reference);
}
@Override
public Set<ApplicationId> getAllSuspendedApplications() {
Set<ApplicationInstanceReference> refSet = statusService.getAllSuspendedApplications();
return refSet.stream().map(OrchestratorUtil::toApplicationId).collect(toSet());
}
@Override
public void resume(ApplicationId appId) throws ApplicationIdNotFoundException, ApplicationStateChangeDeniedException {
setApplicationStatus(appId, ApplicationInstanceStatus.NO_REMARKS);
}
@Override
public void suspend(ApplicationId appId) throws ApplicationIdNotFoundException, ApplicationStateChangeDeniedException {
setApplicationStatus(appId, ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN);
}
@Override
public void suspendAll(HostName parentHostname, List<HostName> hostNames)
throws BatchHostStateChangeDeniedException, BatchHostNameNotFoundException, BatchInternalErrorException {
try (OrchestratorContext context = OrchestratorContext.createContextForMultiAppOp(clock)) {
List<NodeGroup> nodeGroupsOrderedByApplication;
try {
nodeGroupsOrderedByApplication = nodeGroupsOrderedForSuspend(hostNames);
} catch (HostNameNotFoundException e) {
throw new BatchHostNameNotFoundException(parentHostname, hostNames, e);
}
suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, true);
suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, false);
}
}
private void suspendAllNodeGroups(OrchestratorContext context,
HostName parentHostname,
List<NodeGroup> nodeGroupsOrderedByApplication,
boolean probe)
throws BatchHostStateChangeDeniedException, BatchInternalErrorException {
for (NodeGroup nodeGroup : nodeGroupsOrderedByApplication) {
try {
suspendGroup(context.createSubcontextForSingleAppOp(probe), nodeGroup);
} catch (HostStateChangeDeniedException e) {
throw new BatchHostStateChangeDeniedException(parentHostname, nodeGroup, e);
} catch (UncheckedTimeoutException e) {
throw e;
} catch (RuntimeException e) {
throw new BatchInternalErrorException(parentHostname, nodeGroup, e);
}
}
}
/**
* PROBLEM
* Take the example of 2 Docker hosts:
* - Docker host 1 has two nodes A1 and B1, belonging to the application with
* a globally unique ID A and B, respectively.
* - Similarly, Docker host 2 has two nodes running content nodes A2 and B2,
* and we assume both A1 and A2 (and B1 and B2) have services within the same service cluster.
*
* Suppose both Docker hosts wanting to reboot, and
* - Docker host 1 asks to suspend A1 and B1, while
* - Docker host 2 asks to suspend B2 and A2.
*
* The Orchestrator may allow suspend of A1 and B2, before requesting the suspension of B1 and A2.
* None of these can be suspended (assuming max 1 suspended content node per content cluster),
* and so both requests for suspension will fail.
*
* Note that it's not a deadlock - both client will fail immediately and resume both A1 and B2 before
* responding to the client, and if host 1 asks later w/o host 2 asking at the same time,
* it will be given permission to suspend. However if both hosts were to request in lock-step,
* there would be starvation. And in general, it would fail requests for suspension more
* than necessary.
*
* SOLUTION
* The solution we're using is to order the hostnames by the globally unique application instance ID,
* e.g. hosted-vespa:routing:dev:some-region:default. In the example above, it would guarantee
* Docker host 2 would ensure ask to suspend B2 before A2. We take care of that ordering here.
*
* NodeGroups complicate the above picture a little: Each A1, A2, B1, and B2 is a NodeGroup that may
* contain several nodes (on the same Docker host). But the argument still applies.
*/
private List<NodeGroup> nodeGroupsOrderedForSuspend(List<HostName> hostNames) throws HostNameNotFoundException {
Map<ApplicationInstanceReference, NodeGroup> nodeGroupMap = new HashMap<>(hostNames.size());
for (HostName hostName : hostNames) {
ApplicationInstance application = getApplicationInstance(hostName);
NodeGroup nodeGroup = nodeGroupMap.get(application.reference());
if (nodeGroup == null) {
nodeGroup = new NodeGroup(application);
nodeGroupMap.put(application.reference(), nodeGroup);
}
nodeGroup.addNode(hostName);
}
return nodeGroupMap.values().stream()
.sorted(OrchestratorImpl::compareNodeGroupsForSuspend)
.collect(toList());
}
private static int compareNodeGroupsForSuspend(NodeGroup leftNodeGroup, NodeGroup rightNodeGroup) {
ApplicationInstanceReference leftApplicationReference = leftNodeGroup.getApplicationReference();
ApplicationInstanceReference rightApplicationReference = rightNodeGroup.getApplicationReference();
return leftApplicationReference.asString().compareTo(rightApplicationReference.asString());
}
private void setApplicationStatus(ApplicationId appId, ApplicationInstanceStatus status)
throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException{
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
ApplicationInstanceReference reference = OrchestratorUtil.toApplicationInstanceReference(appId, serviceMonitor);
ApplicationInstance application = serviceMonitor.getApplication(reference)
.orElseThrow(ApplicationIdNotFoundException::new);
try (ApplicationLock lock = statusService.lockApplication(context, reference)) {
if (status == lock.getApplicationInstanceStatus()) return;
if (status == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
HostInfos hostInfosSnapshot = lock.getHostInfos();
OrchestratorUtil.getHostsUsedByApplicationInstance(application)
.stream()
.filter(hostname -> !hostInfosSnapshot.getOrNoRemarks(hostname).status().isSuspended())
.forEach(hostname -> lock.setHostState(hostname, HostStatus.ALLOWED_TO_BE_DOWN));
setClusterStateInController(context.createSubcontextWithinLock(), application, MAINTENANCE);
}
lock.setApplicationInstanceStatus(status);
}
}
@Override
private void setClusterStateInController(OrchestratorContext context,
ApplicationInstance application,
ClusterControllerNodeState state)
throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException {
Set<ClusterId> contentClusterIds = application.serviceClusters().stream()
.filter(VespaModelUtil::isContent)
.map(ServiceCluster::clusterId)
.collect(toSet());
for (ClusterId clusterId : contentClusterIds) {
List<HostName> clusterControllers = VespaModelUtil.getClusterControllerInstancesInOrder(application, clusterId);
ClusterControllerClient client = clusterControllerClientFactory.createClient(
clusterControllers,
clusterId.s());
try {
ClusterControllerStateResponse response = client.setApplicationState(context, state);
if (!response.wasModified) {
String msg = String.format("Fail to set application %s, cluster name %s to cluster state %s due to: %s",
application.applicationInstanceId(), clusterId, state, response.reason);
throw new ApplicationStateChangeDeniedException(msg);
}
} catch (IOException e) {
throw new ApplicationStateChangeDeniedException(e.getMessage());
} catch (UncheckedTimeoutException e) {
throw new ApplicationStateChangeDeniedException(
"Timed out while waiting for cluster controllers " + clusterControllers +
" with cluster ID " + clusterId.s() + ": " + e.getMessage());
}
}
}
private ApplicationInstanceReference getApplicationInstanceReference(HostName hostname) throws HostNameNotFoundException {
return serviceMonitor.getApplicationInstanceReference(hostname)
.orElseThrow(() -> new HostNameNotFoundException(hostname));
}
private ApplicationInstance getApplicationInstance(HostName hostName) throws HostNameNotFoundException{
return serviceMonitor.getApplication(hostName)
.orElseThrow(() -> new HostNameNotFoundException(hostName));
}
private static void sleep(long time, TimeUnit timeUnit) {
try {
Thread.sleep(timeUnit.toMillis(time));
} catch (InterruptedException e) {
throw new RuntimeException("Unexpectedly interrupted", e);
}
}
} |
Or rather, create a new method for creating a special context for this type of application operation: It is invoking the CC once per node, so it probably needs more than the 10s time budge. | public boolean isQuiescent(ApplicationId id) {
try {
ApplicationInstance application = serviceMonitor.getApplication(OrchestratorUtil.toApplicationInstanceReference(id, serviceMonitor))
.orElseThrow(ApplicationIdNotFoundException::new);
List<ServiceCluster> contentClusters = application.serviceClusters().stream()
.filter(VespaModelUtil::isContent)
.collect(toList());
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock)
.createSubcontextForSingleAppOp(true);
for (ServiceCluster cluster : contentClusters) {
List<HostName> clusterControllers = VespaModelUtil.getClusterControllerInstancesInOrder(application, cluster.clusterId());
ClusterControllerClient client = clusterControllerClientFactory.createClient(clusterControllers, cluster.clusterId().s());
for (ServiceInstance service : cluster.serviceInstances()) {
try {
ClusterControllerStateResponse response = client.setNodeState(context,
VespaModelUtil.getStorageNodeIndex(service.configId()),
MAINTENANCE);
if ( ! response.wasModified)
return false;
}
catch (Exception e) {
log.log(Level.INFO, "Failed probing for permission to set " + service + " in MAINTENANCE: " + Exceptions.toMessageString(e));
return false;
}
}
}
return true;
}
catch (ApplicationIdNotFoundException ignored) {
return false;
}
} | OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock) | public boolean isQuiescent(ApplicationId id) {
try {
ApplicationInstance application = serviceMonitor.getApplication(OrchestratorUtil.toApplicationInstanceReference(id, serviceMonitor))
.orElseThrow(ApplicationIdNotFoundException::new);
List<ServiceCluster> contentClusters = application.serviceClusters().stream()
.filter(VespaModelUtil::isContent)
.collect(toList());
OrchestratorContext context = OrchestratorContext.createContextForBatchProbe(clock);
for (ServiceCluster cluster : contentClusters) {
List<HostName> clusterControllers = VespaModelUtil.getClusterControllerInstancesInOrder(application, cluster.clusterId());
ClusterControllerClient client = clusterControllerClientFactory.createClient(clusterControllers, cluster.clusterId().s());
for (ServiceInstance service : cluster.serviceInstances()) {
try {
ClusterControllerStateResponse response = client.setNodeState(context,
VespaModelUtil.getStorageNodeIndex(service.configId()),
MAINTENANCE);
if ( ! response.wasModified)
return false;
}
catch (Exception e) {
log.log(Level.INFO, "Failed probing for permission to set " + service + " in MAINTENANCE: " + Exceptions.toMessageString(e));
return false;
}
}
}
return true;
}
catch (ApplicationIdNotFoundException ignored) {
return false;
}
} | class OrchestratorImpl implements Orchestrator {
private static final Logger log = Logger.getLogger(OrchestratorImpl.class.getName());
private final Policy policy;
private final StatusService statusService;
private final ServiceMonitor serviceMonitor;
private final int serviceMonitorConvergenceLatencySeconds;
private final ClusterControllerClientFactory clusterControllerClientFactory;
private final Clock clock;
private final ApplicationApiFactory applicationApiFactory;
@Inject
public OrchestratorImpl(ClusterControllerClientFactory clusterControllerClientFactory,
StatusService statusService,
OrchestratorConfig orchestratorConfig,
ServiceMonitor serviceMonitor,
ConfigserverConfig configServerConfig,
FlagSource flagSource)
{
this(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource),
clusterControllerClientFactory,
new ApplicationApiFactory(configServerConfig.zookeeperserver().size(), Clock.systemUTC())),
clusterControllerClientFactory,
statusService,
serviceMonitor,
orchestratorConfig.serviceMonitorConvergenceLatencySeconds(),
Clock.systemUTC(),
new ApplicationApiFactory(configServerConfig.zookeeperserver().size(), Clock.systemUTC()),
flagSource);
}
public OrchestratorImpl(Policy policy,
ClusterControllerClientFactory clusterControllerClientFactory,
StatusService statusService,
ServiceMonitor serviceMonitor,
int serviceMonitorConvergenceLatencySeconds,
Clock clock,
ApplicationApiFactory applicationApiFactory,
FlagSource flagSource)
{
this.policy = policy;
this.clusterControllerClientFactory = clusterControllerClientFactory;
this.statusService = statusService;
this.serviceMonitorConvergenceLatencySeconds = serviceMonitorConvergenceLatencySeconds;
this.serviceMonitor = serviceMonitor;
this.clock = clock;
this.applicationApiFactory = applicationApiFactory;
serviceMonitor.registerListener(statusService);
}
@Override
public Host getHost(HostName hostName) throws HostNameNotFoundException {
ApplicationInstance applicationInstance = serviceMonitor
.getApplicationNarrowedTo(hostName)
.orElseThrow(() -> new HostNameNotFoundException(hostName));
List<ServiceInstance> serviceInstances = applicationInstance
.serviceClusters().stream()
.flatMap(cluster -> cluster.serviceInstances().stream())
.filter(serviceInstance -> hostName.equals(serviceInstance.hostName()))
.collect(toList());
HostInfo hostInfo = statusService.getHostInfo(applicationInstance.reference(), hostName);
return new Host(hostName, hostInfo, applicationInstance.reference(), serviceInstances);
}
@Override
public HostStatus getNodeStatus(HostName hostName) throws HostNameNotFoundException {
ApplicationInstanceReference reference = getApplicationInstanceReference(hostName);
return statusService.getHostInfo(reference, hostName).status();
}
@Override
public HostInfo getHostInfo(ApplicationInstanceReference reference, HostName hostname) {
return statusService.getHostInfo(reference, hostname);
}
@Override
public Function<HostName, Optional<HostInfo>> getHostResolver() {
return hostName -> serviceMonitor
.getApplicationInstanceReference(hostName)
.map(reference -> statusService.getHostInfo(reference, hostName));
}
@Override
public void setNodeStatus(HostName hostName, HostStatus status) throws OrchestrationException {
ApplicationInstanceReference reference = getApplicationInstanceReference(hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, reference)) {
lock.setHostState(hostName, status);
}
}
@Override
public void resume(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {
/*
* When making a state transition to this state, we have to consider that if the host has been in
* ALLOWED_TO_BE_DOWN state, services on the host may recently have been stopped (and, presumably, started).
* Service monitoring may not have had enough time to detect that services were stopped,
* and may therefore mistakenly report services as up, even if they still haven't initialized and
* are not yet ready for serving. Erroneously reporting both host and services as up causes a race
* where services on other hosts may be stopped prematurely. A delay here ensures that service
* monitoring will have had time to catch up. Since we don't want do the delay with the lock held,
* and the host status service's locking functionality does not support something like condition
* variables or Object.wait(), we break out here, releasing the lock before delaying.
*
* 2020-02-07: We should utilize suspendedSince timestamp on the HostInfo: The above
* is equivalent to guaranteeing a minimum time after suspendedSince, before checking
* the health with service monitor. This should for all practical purposes remove
* the amount of time in this sleep.
* Caveat: Cannot be implemented before lingering HostInfo has been fixed (VESPA-17546).
*/
sleep(serviceMonitorConvergenceLatencySeconds, TimeUnit.SECONDS);
ApplicationInstance appInstance = getApplicationInstance(hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, appInstance.reference())) {
HostStatus currentHostState = lock.getHostInfos().getOrNoRemarks(hostName).status();
if (currentHostState == HostStatus.NO_REMARKS) {
return;
}
if (currentHostState == HostStatus.PERMANENTLY_DOWN ||
lock.getApplicationInstanceStatus() == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
return;
}
policy.releaseSuspensionGrant(context.createSubcontextWithinLock(), appInstance, hostName, lock);
}
}
@Override
public void suspend(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {
ApplicationInstance appInstance = getApplicationInstance(hostName);
NodeGroup nodeGroup = new NodeGroup(appInstance, hostName);
suspendGroup(OrchestratorContext.createContextForSingleAppOp(clock), nodeGroup);
}
@Override
public void acquirePermissionToRemove(HostName hostName) throws OrchestrationException {
ApplicationInstance appInstance = getApplicationInstance(hostName);
NodeGroup nodeGroup = new NodeGroup(appInstance, hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, appInstance.reference())) {
ApplicationApi applicationApi = applicationApiFactory.create(nodeGroup, lock, clusterControllerClientFactory);
policy.acquirePermissionToRemove(context.createSubcontextWithinLock(), applicationApi);
}
}
/**
* Suspend normal operations for a group of nodes in the same application.
*
* @param nodeGroup The group of nodes in an application.
* @throws HostStateChangeDeniedException if the request cannot be met due to policy constraints.
*/
void suspendGroup(OrchestratorContext context, NodeGroup nodeGroup) throws HostStateChangeDeniedException {
ApplicationInstanceReference applicationReference = nodeGroup.getApplicationReference();
final SuspensionReasons suspensionReasons;
try (ApplicationLock lock = statusService.lockApplication(context, applicationReference)) {
ApplicationInstanceStatus appStatus = lock.getApplicationInstanceStatus();
if (appStatus == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
return;
}
ApplicationApi applicationApi = applicationApiFactory.create(
nodeGroup, lock, clusterControllerClientFactory);
suspensionReasons = policy.grantSuspensionRequest(context.createSubcontextWithinLock(), applicationApi);
}
suspensionReasons.makeLogMessage().ifPresent(log::info);
}
@Override
public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationId appId) throws ApplicationIdNotFoundException {
ApplicationInstanceReference reference = OrchestratorUtil.toApplicationInstanceReference(appId, serviceMonitor);
return statusService.getApplicationInstanceStatus(reference);
}
@Override
public Set<ApplicationId> getAllSuspendedApplications() {
Set<ApplicationInstanceReference> refSet = statusService.getAllSuspendedApplications();
return refSet.stream().map(OrchestratorUtil::toApplicationId).collect(toSet());
}
@Override
public void resume(ApplicationId appId) throws ApplicationIdNotFoundException, ApplicationStateChangeDeniedException {
setApplicationStatus(appId, ApplicationInstanceStatus.NO_REMARKS);
}
@Override
public void suspend(ApplicationId appId) throws ApplicationIdNotFoundException, ApplicationStateChangeDeniedException {
setApplicationStatus(appId, ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN);
}
@Override
public void suspendAll(HostName parentHostname, List<HostName> hostNames)
throws BatchHostStateChangeDeniedException, BatchHostNameNotFoundException, BatchInternalErrorException {
try (OrchestratorContext context = OrchestratorContext.createContextForMultiAppOp(clock)) {
List<NodeGroup> nodeGroupsOrderedByApplication;
try {
nodeGroupsOrderedByApplication = nodeGroupsOrderedForSuspend(hostNames);
} catch (HostNameNotFoundException e) {
throw new BatchHostNameNotFoundException(parentHostname, hostNames, e);
}
suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, true);
suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, false);
}
}
private void suspendAllNodeGroups(OrchestratorContext context,
HostName parentHostname,
List<NodeGroup> nodeGroupsOrderedByApplication,
boolean probe)
throws BatchHostStateChangeDeniedException, BatchInternalErrorException {
for (NodeGroup nodeGroup : nodeGroupsOrderedByApplication) {
try {
suspendGroup(context.createSubcontextForSingleAppOp(probe), nodeGroup);
} catch (HostStateChangeDeniedException e) {
throw new BatchHostStateChangeDeniedException(parentHostname, nodeGroup, e);
} catch (UncheckedTimeoutException e) {
throw e;
} catch (RuntimeException e) {
throw new BatchInternalErrorException(parentHostname, nodeGroup, e);
}
}
}
/**
* PROBLEM
* Take the example of 2 Docker hosts:
* - Docker host 1 has two nodes A1 and B1, belonging to the application with
* a globally unique ID A and B, respectively.
* - Similarly, Docker host 2 has two nodes running content nodes A2 and B2,
* and we assume both A1 and A2 (and B1 and B2) have services within the same service cluster.
*
* Suppose both Docker hosts wanting to reboot, and
* - Docker host 1 asks to suspend A1 and B1, while
* - Docker host 2 asks to suspend B2 and A2.
*
* The Orchestrator may allow suspend of A1 and B2, before requesting the suspension of B1 and A2.
* None of these can be suspended (assuming max 1 suspended content node per content cluster),
* and so both requests for suspension will fail.
*
* Note that it's not a deadlock - both client will fail immediately and resume both A1 and B2 before
* responding to the client, and if host 1 asks later w/o host 2 asking at the same time,
* it will be given permission to suspend. However if both hosts were to request in lock-step,
* there would be starvation. And in general, it would fail requests for suspension more
* than necessary.
*
* SOLUTION
* The solution we're using is to order the hostnames by the globally unique application instance ID,
* e.g. hosted-vespa:routing:dev:some-region:default. In the example above, it would guarantee
* Docker host 2 would ensure ask to suspend B2 before A2. We take care of that ordering here.
*
* NodeGroups complicate the above picture a little: Each A1, A2, B1, and B2 is a NodeGroup that may
* contain several nodes (on the same Docker host). But the argument still applies.
*/
private List<NodeGroup> nodeGroupsOrderedForSuspend(List<HostName> hostNames) throws HostNameNotFoundException {
Map<ApplicationInstanceReference, NodeGroup> nodeGroupMap = new HashMap<>(hostNames.size());
for (HostName hostName : hostNames) {
ApplicationInstance application = getApplicationInstance(hostName);
NodeGroup nodeGroup = nodeGroupMap.get(application.reference());
if (nodeGroup == null) {
nodeGroup = new NodeGroup(application);
nodeGroupMap.put(application.reference(), nodeGroup);
}
nodeGroup.addNode(hostName);
}
return nodeGroupMap.values().stream()
.sorted(OrchestratorImpl::compareNodeGroupsForSuspend)
.collect(toList());
}
private static int compareNodeGroupsForSuspend(NodeGroup leftNodeGroup, NodeGroup rightNodeGroup) {
ApplicationInstanceReference leftApplicationReference = leftNodeGroup.getApplicationReference();
ApplicationInstanceReference rightApplicationReference = rightNodeGroup.getApplicationReference();
return leftApplicationReference.asString().compareTo(rightApplicationReference.asString());
}
private void setApplicationStatus(ApplicationId appId, ApplicationInstanceStatus status)
throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException{
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
ApplicationInstanceReference reference = OrchestratorUtil.toApplicationInstanceReference(appId, serviceMonitor);
ApplicationInstance application = serviceMonitor.getApplication(reference)
.orElseThrow(ApplicationIdNotFoundException::new);
try (ApplicationLock lock = statusService.lockApplication(context, reference)) {
if (status == lock.getApplicationInstanceStatus()) return;
if (status == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
HostInfos hostInfosSnapshot = lock.getHostInfos();
OrchestratorUtil.getHostsUsedByApplicationInstance(application)
.stream()
.filter(hostname -> !hostInfosSnapshot.getOrNoRemarks(hostname).status().isSuspended())
.forEach(hostname -> lock.setHostState(hostname, HostStatus.ALLOWED_TO_BE_DOWN));
setClusterStateInController(context.createSubcontextWithinLock(), application, MAINTENANCE);
}
lock.setApplicationInstanceStatus(status);
}
}
@Override
private void setClusterStateInController(OrchestratorContext context,
ApplicationInstance application,
ClusterControllerNodeState state)
throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException {
Set<ClusterId> contentClusterIds = application.serviceClusters().stream()
.filter(VespaModelUtil::isContent)
.map(ServiceCluster::clusterId)
.collect(toSet());
for (ClusterId clusterId : contentClusterIds) {
List<HostName> clusterControllers = VespaModelUtil.getClusterControllerInstancesInOrder(application, clusterId);
ClusterControllerClient client = clusterControllerClientFactory.createClient(
clusterControllers,
clusterId.s());
try {
ClusterControllerStateResponse response = client.setApplicationState(context, state);
if (!response.wasModified) {
String msg = String.format("Fail to set application %s, cluster name %s to cluster state %s due to: %s",
application.applicationInstanceId(), clusterId, state, response.reason);
throw new ApplicationStateChangeDeniedException(msg);
}
} catch (IOException e) {
throw new ApplicationStateChangeDeniedException(e.getMessage());
} catch (UncheckedTimeoutException e) {
throw new ApplicationStateChangeDeniedException(
"Timed out while waiting for cluster controllers " + clusterControllers +
" with cluster ID " + clusterId.s() + ": " + e.getMessage());
}
}
}
private ApplicationInstanceReference getApplicationInstanceReference(HostName hostname) throws HostNameNotFoundException {
return serviceMonitor.getApplicationInstanceReference(hostname)
.orElseThrow(() -> new HostNameNotFoundException(hostname));
}
private ApplicationInstance getApplicationInstance(HostName hostName) throws HostNameNotFoundException{
return serviceMonitor.getApplication(hostName)
.orElseThrow(() -> new HostNameNotFoundException(hostName));
}
private static void sleep(long time, TimeUnit timeUnit) {
try {
Thread.sleep(timeUnit.toMillis(time));
} catch (InterruptedException e) {
throw new RuntimeException("Unexpectedly interrupted", e);
}
}
} | class OrchestratorImpl implements Orchestrator {
private static final Logger log = Logger.getLogger(OrchestratorImpl.class.getName());
private final Policy policy;
private final StatusService statusService;
private final ServiceMonitor serviceMonitor;
private final int serviceMonitorConvergenceLatencySeconds;
private final ClusterControllerClientFactory clusterControllerClientFactory;
private final Clock clock;
private final ApplicationApiFactory applicationApiFactory;
@Inject
public OrchestratorImpl(ClusterControllerClientFactory clusterControllerClientFactory,
StatusService statusService,
OrchestratorConfig orchestratorConfig,
ServiceMonitor serviceMonitor,
ConfigserverConfig configServerConfig,
FlagSource flagSource)
{
this(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource),
clusterControllerClientFactory,
new ApplicationApiFactory(configServerConfig.zookeeperserver().size(), Clock.systemUTC())),
clusterControllerClientFactory,
statusService,
serviceMonitor,
orchestratorConfig.serviceMonitorConvergenceLatencySeconds(),
Clock.systemUTC(),
new ApplicationApiFactory(configServerConfig.zookeeperserver().size(), Clock.systemUTC()),
flagSource);
}
public OrchestratorImpl(Policy policy,
ClusterControllerClientFactory clusterControllerClientFactory,
StatusService statusService,
ServiceMonitor serviceMonitor,
int serviceMonitorConvergenceLatencySeconds,
Clock clock,
ApplicationApiFactory applicationApiFactory,
FlagSource flagSource)
{
this.policy = policy;
this.clusterControllerClientFactory = clusterControllerClientFactory;
this.statusService = statusService;
this.serviceMonitorConvergenceLatencySeconds = serviceMonitorConvergenceLatencySeconds;
this.serviceMonitor = serviceMonitor;
this.clock = clock;
this.applicationApiFactory = applicationApiFactory;
serviceMonitor.registerListener(statusService);
}
@Override
public Host getHost(HostName hostName) throws HostNameNotFoundException {
ApplicationInstance applicationInstance = serviceMonitor
.getApplicationNarrowedTo(hostName)
.orElseThrow(() -> new HostNameNotFoundException(hostName));
List<ServiceInstance> serviceInstances = applicationInstance
.serviceClusters().stream()
.flatMap(cluster -> cluster.serviceInstances().stream())
.filter(serviceInstance -> hostName.equals(serviceInstance.hostName()))
.collect(toList());
HostInfo hostInfo = statusService.getHostInfo(applicationInstance.reference(), hostName);
return new Host(hostName, hostInfo, applicationInstance.reference(), serviceInstances);
}
@Override
public HostStatus getNodeStatus(HostName hostName) throws HostNameNotFoundException {
ApplicationInstanceReference reference = getApplicationInstanceReference(hostName);
return statusService.getHostInfo(reference, hostName).status();
}
@Override
public HostInfo getHostInfo(ApplicationInstanceReference reference, HostName hostname) {
return statusService.getHostInfo(reference, hostname);
}
@Override
public Function<HostName, Optional<HostInfo>> getHostResolver() {
return hostName -> serviceMonitor
.getApplicationInstanceReference(hostName)
.map(reference -> statusService.getHostInfo(reference, hostName));
}
@Override
public void setNodeStatus(HostName hostName, HostStatus status) throws OrchestrationException {
ApplicationInstanceReference reference = getApplicationInstanceReference(hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, reference)) {
lock.setHostState(hostName, status);
}
}
@Override
public void resume(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {
/*
* When making a state transition to this state, we have to consider that if the host has been in
* ALLOWED_TO_BE_DOWN state, services on the host may recently have been stopped (and, presumably, started).
* Service monitoring may not have had enough time to detect that services were stopped,
* and may therefore mistakenly report services as up, even if they still haven't initialized and
* are not yet ready for serving. Erroneously reporting both host and services as up causes a race
* where services on other hosts may be stopped prematurely. A delay here ensures that service
* monitoring will have had time to catch up. Since we don't want do the delay with the lock held,
* and the host status service's locking functionality does not support something like condition
* variables or Object.wait(), we break out here, releasing the lock before delaying.
*
* 2020-02-07: We should utilize suspendedSince timestamp on the HostInfo: The above
* is equivalent to guaranteeing a minimum time after suspendedSince, before checking
* the health with service monitor. This should for all practical purposes remove
* the amount of time in this sleep.
* Caveat: Cannot be implemented before lingering HostInfo has been fixed (VESPA-17546).
*/
sleep(serviceMonitorConvergenceLatencySeconds, TimeUnit.SECONDS);
ApplicationInstance appInstance = getApplicationInstance(hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, appInstance.reference())) {
HostStatus currentHostState = lock.getHostInfos().getOrNoRemarks(hostName).status();
if (currentHostState == HostStatus.NO_REMARKS) {
return;
}
if (currentHostState == HostStatus.PERMANENTLY_DOWN ||
lock.getApplicationInstanceStatus() == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
return;
}
policy.releaseSuspensionGrant(context.createSubcontextWithinLock(), appInstance, hostName, lock);
}
}
@Override
public void suspend(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {
ApplicationInstance appInstance = getApplicationInstance(hostName);
NodeGroup nodeGroup = new NodeGroup(appInstance, hostName);
suspendGroup(OrchestratorContext.createContextForSingleAppOp(clock), nodeGroup);
}
@Override
public void acquirePermissionToRemove(HostName hostName) throws OrchestrationException {
ApplicationInstance appInstance = getApplicationInstance(hostName);
NodeGroup nodeGroup = new NodeGroup(appInstance, hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (ApplicationLock lock = statusService.lockApplication(context, appInstance.reference())) {
ApplicationApi applicationApi = applicationApiFactory.create(nodeGroup, lock, clusterControllerClientFactory);
policy.acquirePermissionToRemove(context.createSubcontextWithinLock(), applicationApi);
}
}
/**
* Suspend normal operations for a group of nodes in the same application.
*
* @param nodeGroup The group of nodes in an application.
* @throws HostStateChangeDeniedException if the request cannot be met due to policy constraints.
*/
void suspendGroup(OrchestratorContext context, NodeGroup nodeGroup) throws HostStateChangeDeniedException {
ApplicationInstanceReference applicationReference = nodeGroup.getApplicationReference();
final SuspensionReasons suspensionReasons;
try (ApplicationLock lock = statusService.lockApplication(context, applicationReference)) {
ApplicationInstanceStatus appStatus = lock.getApplicationInstanceStatus();
if (appStatus == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
return;
}
ApplicationApi applicationApi = applicationApiFactory.create(
nodeGroup, lock, clusterControllerClientFactory);
suspensionReasons = policy.grantSuspensionRequest(context.createSubcontextWithinLock(), applicationApi);
}
suspensionReasons.makeLogMessage().ifPresent(log::info);
}
@Override
public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationId appId) throws ApplicationIdNotFoundException {
ApplicationInstanceReference reference = OrchestratorUtil.toApplicationInstanceReference(appId, serviceMonitor);
return statusService.getApplicationInstanceStatus(reference);
}
@Override
public Set<ApplicationId> getAllSuspendedApplications() {
Set<ApplicationInstanceReference> refSet = statusService.getAllSuspendedApplications();
return refSet.stream().map(OrchestratorUtil::toApplicationId).collect(toSet());
}
@Override
public void resume(ApplicationId appId) throws ApplicationIdNotFoundException, ApplicationStateChangeDeniedException {
setApplicationStatus(appId, ApplicationInstanceStatus.NO_REMARKS);
}
@Override
public void suspend(ApplicationId appId) throws ApplicationIdNotFoundException, ApplicationStateChangeDeniedException {
setApplicationStatus(appId, ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN);
}
@Override
public void suspendAll(HostName parentHostname, List<HostName> hostNames)
throws BatchHostStateChangeDeniedException, BatchHostNameNotFoundException, BatchInternalErrorException {
try (OrchestratorContext context = OrchestratorContext.createContextForMultiAppOp(clock)) {
List<NodeGroup> nodeGroupsOrderedByApplication;
try {
nodeGroupsOrderedByApplication = nodeGroupsOrderedForSuspend(hostNames);
} catch (HostNameNotFoundException e) {
throw new BatchHostNameNotFoundException(parentHostname, hostNames, e);
}
suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, true);
suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, false);
}
}
private void suspendAllNodeGroups(OrchestratorContext context,
HostName parentHostname,
List<NodeGroup> nodeGroupsOrderedByApplication,
boolean probe)
throws BatchHostStateChangeDeniedException, BatchInternalErrorException {
for (NodeGroup nodeGroup : nodeGroupsOrderedByApplication) {
try {
suspendGroup(context.createSubcontextForSingleAppOp(probe), nodeGroup);
} catch (HostStateChangeDeniedException e) {
throw new BatchHostStateChangeDeniedException(parentHostname, nodeGroup, e);
} catch (UncheckedTimeoutException e) {
throw e;
} catch (RuntimeException e) {
throw new BatchInternalErrorException(parentHostname, nodeGroup, e);
}
}
}
/**
* PROBLEM
* Take the example of 2 Docker hosts:
* - Docker host 1 has two nodes A1 and B1, belonging to the application with
* a globally unique ID A and B, respectively.
* - Similarly, Docker host 2 has two nodes running content nodes A2 and B2,
* and we assume both A1 and A2 (and B1 and B2) have services within the same service cluster.
*
* Suppose both Docker hosts wanting to reboot, and
* - Docker host 1 asks to suspend A1 and B1, while
* - Docker host 2 asks to suspend B2 and A2.
*
* The Orchestrator may allow suspend of A1 and B2, before requesting the suspension of B1 and A2.
* None of these can be suspended (assuming max 1 suspended content node per content cluster),
* and so both requests for suspension will fail.
*
* Note that it's not a deadlock - both client will fail immediately and resume both A1 and B2 before
* responding to the client, and if host 1 asks later w/o host 2 asking at the same time,
* it will be given permission to suspend. However if both hosts were to request in lock-step,
* there would be starvation. And in general, it would fail requests for suspension more
* than necessary.
*
* SOLUTION
* The solution we're using is to order the hostnames by the globally unique application instance ID,
* e.g. hosted-vespa:routing:dev:some-region:default. In the example above, it would guarantee
* Docker host 2 would ensure ask to suspend B2 before A2. We take care of that ordering here.
*
* NodeGroups complicate the above picture a little: Each A1, A2, B1, and B2 is a NodeGroup that may
* contain several nodes (on the same Docker host). But the argument still applies.
*/
private List<NodeGroup> nodeGroupsOrderedForSuspend(List<HostName> hostNames) throws HostNameNotFoundException {
Map<ApplicationInstanceReference, NodeGroup> nodeGroupMap = new HashMap<>(hostNames.size());
for (HostName hostName : hostNames) {
ApplicationInstance application = getApplicationInstance(hostName);
NodeGroup nodeGroup = nodeGroupMap.get(application.reference());
if (nodeGroup == null) {
nodeGroup = new NodeGroup(application);
nodeGroupMap.put(application.reference(), nodeGroup);
}
nodeGroup.addNode(hostName);
}
return nodeGroupMap.values().stream()
.sorted(OrchestratorImpl::compareNodeGroupsForSuspend)
.collect(toList());
}
private static int compareNodeGroupsForSuspend(NodeGroup leftNodeGroup, NodeGroup rightNodeGroup) {
ApplicationInstanceReference leftApplicationReference = leftNodeGroup.getApplicationReference();
ApplicationInstanceReference rightApplicationReference = rightNodeGroup.getApplicationReference();
return leftApplicationReference.asString().compareTo(rightApplicationReference.asString());
}
private void setApplicationStatus(ApplicationId appId, ApplicationInstanceStatus status)
throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException{
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
ApplicationInstanceReference reference = OrchestratorUtil.toApplicationInstanceReference(appId, serviceMonitor);
ApplicationInstance application = serviceMonitor.getApplication(reference)
.orElseThrow(ApplicationIdNotFoundException::new);
try (ApplicationLock lock = statusService.lockApplication(context, reference)) {
if (status == lock.getApplicationInstanceStatus()) return;
if (status == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
HostInfos hostInfosSnapshot = lock.getHostInfos();
OrchestratorUtil.getHostsUsedByApplicationInstance(application)
.stream()
.filter(hostname -> !hostInfosSnapshot.getOrNoRemarks(hostname).status().isSuspended())
.forEach(hostname -> lock.setHostState(hostname, HostStatus.ALLOWED_TO_BE_DOWN));
setClusterStateInController(context.createSubcontextWithinLock(), application, MAINTENANCE);
}
lock.setApplicationInstanceStatus(status);
}
}
@Override
private void setClusterStateInController(OrchestratorContext context,
ApplicationInstance application,
ClusterControllerNodeState state)
throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException {
Set<ClusterId> contentClusterIds = application.serviceClusters().stream()
.filter(VespaModelUtil::isContent)
.map(ServiceCluster::clusterId)
.collect(toSet());
for (ClusterId clusterId : contentClusterIds) {
List<HostName> clusterControllers = VespaModelUtil.getClusterControllerInstancesInOrder(application, clusterId);
ClusterControllerClient client = clusterControllerClientFactory.createClient(
clusterControllers,
clusterId.s());
try {
ClusterControllerStateResponse response = client.setApplicationState(context, state);
if (!response.wasModified) {
String msg = String.format("Fail to set application %s, cluster name %s to cluster state %s due to: %s",
application.applicationInstanceId(), clusterId, state, response.reason);
throw new ApplicationStateChangeDeniedException(msg);
}
} catch (IOException e) {
throw new ApplicationStateChangeDeniedException(e.getMessage());
} catch (UncheckedTimeoutException e) {
throw new ApplicationStateChangeDeniedException(
"Timed out while waiting for cluster controllers " + clusterControllers +
" with cluster ID " + clusterId.s() + ": " + e.getMessage());
}
}
}
private ApplicationInstanceReference getApplicationInstanceReference(HostName hostname) throws HostNameNotFoundException {
return serviceMonitor.getApplicationInstanceReference(hostname)
.orElseThrow(() -> new HostNameNotFoundException(hostname));
}
private ApplicationInstance getApplicationInstance(HostName hostName) throws HostNameNotFoundException{
return serviceMonitor.getApplication(hostName)
.orElseThrow(() -> new HostNameNotFoundException(hostName));
}
private static void sleep(long time, TimeUnit timeUnit) {
try {
Thread.sleep(timeUnit.toMillis(time));
} catch (InterruptedException e) {
throw new RuntimeException("Unexpectedly interrupted", e);
}
}
} |
😆 | public void syncAggregatedStats() {
prevMayHaveMergesPending = new HashMap<>();
for (ContentNodeStats contentNodeStats : aggregatedStats.getStats()) {
int nodeIndex = contentNodeStats.getNodeIndex();
prevMayHaveMergesPending.put(nodeIndex, mayHaveMergesPendingInGlobalSpace(nodeIndex));
}
} | int nodeIndex = contentNodeStats.getNodeIndex(); | public void syncAggregatedStats() {
prevMayHaveMergesPending = new HashMap<>();
for (ContentNodeStats contentNodeStats : aggregatedStats.getStats()) {
int nodeIndex = contentNodeStats.getNodeIndex();
prevMayHaveMergesPending.put(nodeIndex, mayHaveMergesPendingInGlobalSpace(nodeIndex));
}
} | class ClusterStatsChangeTracker {
private AggregatedClusterStats aggregatedStats;
private AggregatedStatsMergePendingChecker checker;
private Map<Integer, Boolean> prevMayHaveMergesPending = null;
public ClusterStatsChangeTracker(AggregatedClusterStats aggregatedStats,
double minMergeCompletionRatio) {
setAggregatedStats(aggregatedStats, minMergeCompletionRatio);
}
private void setAggregatedStats(AggregatedClusterStats aggregatedStats,
double minMergeCompletionRatio) {
this.aggregatedStats = aggregatedStats;
checker = new AggregatedStatsMergePendingChecker(this.aggregatedStats, minMergeCompletionRatio);
}
public void updateAggregatedStats(AggregatedClusterStats newAggregatedStats,
double minMergeCompletionRatio) {
syncAggregatedStats();
setAggregatedStats(newAggregatedStats, minMergeCompletionRatio);
}
public boolean statsHaveChanged() {
if (!aggregatedStats.hasUpdatesFromAllDistributors()) {
return false;
}
for (ContentNodeStats contentNodeStats : aggregatedStats.getStats()) {
int nodeIndex = contentNodeStats.getNodeIndex();
boolean currValue = mayHaveMergesPendingInGlobalSpace(nodeIndex);
Boolean prevValue = prevMayHaveMergesPendingInGlobalSpace(nodeIndex);
if (prevValue != null) {
if (prevValue != currValue) {
return true;
}
} else {
return true;
}
}
return false;
}
private boolean mayHaveMergesPendingInGlobalSpace(int nodeIndex) {
return checker.mayHaveMergesPending(FixedBucketSpaces.globalSpace(), nodeIndex);
}
private Boolean prevMayHaveMergesPendingInGlobalSpace(int nodeIndex) {
if (prevMayHaveMergesPending != null) {
return prevMayHaveMergesPending.get(nodeIndex);
}
return null;
}
} | class ClusterStatsChangeTracker {
private AggregatedClusterStats aggregatedStats;
private AggregatedStatsMergePendingChecker checker;
private Map<Integer, Boolean> prevMayHaveMergesPending = null;
public ClusterStatsChangeTracker(AggregatedClusterStats aggregatedStats,
double minMergeCompletionRatio) {
setAggregatedStats(aggregatedStats, minMergeCompletionRatio);
}
private void setAggregatedStats(AggregatedClusterStats aggregatedStats,
double minMergeCompletionRatio) {
this.aggregatedStats = aggregatedStats;
checker = new AggregatedStatsMergePendingChecker(this.aggregatedStats, minMergeCompletionRatio);
}
public void updateAggregatedStats(AggregatedClusterStats newAggregatedStats,
double minMergeCompletionRatio) {
syncAggregatedStats();
setAggregatedStats(newAggregatedStats, minMergeCompletionRatio);
}
public boolean statsHaveChanged() {
if (!aggregatedStats.hasUpdatesFromAllDistributors()) {
return false;
}
for (ContentNodeStats contentNodeStats : aggregatedStats.getStats()) {
int nodeIndex = contentNodeStats.getNodeIndex();
boolean currValue = mayHaveMergesPendingInGlobalSpace(nodeIndex);
Boolean prevValue = prevMayHaveMergesPendingInGlobalSpace(nodeIndex);
if (prevValue != null) {
if (prevValue != currValue) {
return true;
}
} else {
return true;
}
}
return false;
}
private boolean mayHaveMergesPendingInGlobalSpace(int nodeIndex) {
return checker.mayHaveMergesPending(FixedBucketSpaces.globalSpace(), nodeIndex);
}
private Boolean prevMayHaveMergesPendingInGlobalSpace(int nodeIndex) {
if (prevMayHaveMergesPending != null) {
return prevMayHaveMergesPending.get(nodeIndex);
}
return null;
}
} |
Default is 5, which makes little sense with 4 services. | public void testSubsetService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))))));
int numberOfServices = 4;
frame.setHop(new HopSpec("test", "docproc/cluster.default/[SubsetService:2]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 1; i <= numberOfServices; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", i));
RoutingNode leaf = frame.select(1).get(0);
lst.add(leaf.getRoute().toString());
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertTrue(lst.size() > 1);
String prev = null;
for (int i = 1; i <= numberOfServices; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String next = leaf.getRoute().toString();
if (prev == null) {
assertNotNull(next);
} else {
assertNotEquals(prev, next);
}
prev = next;
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
lst.clear();
for (int i = 1; i <= numberOfServices; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String route = leaf.getRoute().toString();
lst.add(route);
frame.getNetwork().unregisterSession(route.substring(frame.getIdentity().length() + 1));
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", numberOfServices - i));
Reply reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE, route));
leaf.handleReply(reply);
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(numberOfServices, lst.size());
frame.setHop(new HopSpec("test", "[SubsetService:2]"));
frame.assertMergeOneReply("*");
frame.destroy();
} | frame.setHop(new HopSpec("test", "[SubsetService:2]")); | public void testSubsetService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))))));
int numberOfServices = 4;
frame.setHop(new HopSpec("test", "docproc/cluster.default/[SubsetService:2]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 1; i <= numberOfServices; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", i));
RoutingNode leaf = frame.select(1).get(0);
lst.add(leaf.getRoute().toString());
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertTrue(lst.size() > 1);
String prev = null;
for (int i = 1; i <= numberOfServices; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String next = leaf.getRoute().toString();
if (prev == null) {
assertNotNull(next);
} else {
assertNotEquals(prev, next);
}
prev = next;
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
lst.clear();
for (int i = 1; i <= numberOfServices; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String route = leaf.getRoute().toString();
lst.add(route);
frame.getNetwork().unregisterSession(route.substring(frame.getIdentity().length() + 1));
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", numberOfServices - i));
Reply reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE, route));
leaf.handleReply(reply);
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(numberOfServices, lst.size());
frame.setHop(new HopSpec("test", "[SubsetService:2]"));
frame.assertMergeOneReply("*");
frame.destroy();
} | class PolicyTestCase {
private static final int TIMEOUT = 300;
private static final TimeUnit TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final long TIMEOUT_MILLIS = TIMEOUT_UNIT.toMillis(TIMEOUT);
private final DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentTypeManagerConfigurer.configure(manager, "file:./test/cfg/testdoc.cfg");
}
@Test
public void testProtocol() {
DocumentProtocol protocol = new DocumentProtocol(manager);
RoutingPolicy policy = protocol.createPolicy("AND", null);
assertTrue(policy instanceof ANDPolicy);
policy = new DocumentProtocol(manager).createPolicy("DocumentRouteSelector", "raw:route[0]\n");
assertTrue(policy instanceof DocumentRouteSelectorPolicy);
policy = new DocumentProtocol(manager).createPolicy("Extern", "foo;bar/baz");
assertTrue(policy instanceof ExternPolicy);
policy = new DocumentProtocol(manager).createPolicy("LocalService", null);
assertTrue(policy instanceof LocalServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("RoundRobin", null);
assertTrue(policy instanceof RoundRobinPolicy);
policy = new DocumentProtocol(manager).createPolicy("SubsetService", null);
assertTrue(policy instanceof SubsetServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("LoadBalancer", "cluster=docproc/cluster.default;session=chain.default");
assertTrue(policy instanceof LoadBalancerPolicy);
}
@Test
public void testAND() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[AND]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("foo", "bar"));
frame.setHop(new HopSpec("test", "[AND:baz]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("baz"));
frame.setHop(new HopSpec("test", "[AND:foo]"));
frame.assertMergeOneReply("foo");
frame.setHop(new HopSpec("test", "[AND:foo bar]"));
frame.assertMergeTwoReplies("foo", "bar");
frame.destroy();
}
@Test
public void requireThatExternPolicyWithIllegalParamIsAnErrorPolicy() throws ListenFailedException {
Slobrok slobrok = new Slobrok();
String spec = "tcp/localhost:" + slobrok.port();
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", null) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", "") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";bar") instanceof ErrorPolicy);
}
@Test
public void requireThatExternPolicyWithUnknownPatternSelectsNone() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
setupExternPolicy(frame, new Slobrok(), "foo/bar");
frame.assertSelect(null);
}
@Test
public void requireThatExternPolicySelectsFromExternSlobrok() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
List<TestServer> servers = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
TestServer server = new TestServer("docproc/cluster.default/" + i, null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
servers.add(server);
}
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 10);
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
for (TestServer server : servers) {
server.destroy();
}
frame.destroy();
}
@Test
public void requireThatExternPolicyMergesOneReplyAsProtocol() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
TestServer server = new TestServer("docproc/cluster.default/0", null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 1);
frame.assertMergeOneReply(server.net.getConnectionSpec() + "/chain.default");
server.destroy();
frame.destroy();
}
@Test
public void testExternSend() throws Exception {
Slobrok local = new Slobrok();
TestServer src = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession ss = src.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok slobrok = new Slobrok();
TestServer itr = new TestServer("itr",
new RoutingTableSpec(DocumentProtocol.NAME)
.addRoute(new RouteSpec("default").addHop("dst"))
.addHop(new HopSpec("dst", "dst/session")),
slobrok, new DocumentProtocol(manager));
IntermediateSession is = itr.mb.createIntermediateSession("session", true, new Receptor(), new Receptor());
TestServer dst = new TestServer("dst", null, slobrok, new DocumentProtocol(manager));
DestinationSession ds = dst.mb.createDestinationSession("session", true, new Receptor());
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.getTrace().setLevel(9);
msg.setRoute(Route.parse("[Extern:tcp/localhost:" + slobrok.port() + ";itr/session] default"));
assertTrue(ss.send(msg).isAccepted());
assertNotNull(msg = ((Receptor)is.getMessageHandler()).getMessage(TIMEOUT));
is.forward(msg);
assertNotNull(msg = ((Receptor)ds.getMessageHandler()).getMessage(TIMEOUT));
ds.acknowledge(msg);
Reply reply = ((Receptor)is.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
is.forward(reply);
assertNotNull(reply = ((Receptor)ss.getReplyHandler()).getReply(TIMEOUT));
System.out.println(reply.getTrace().toString());
src.destroy();
itr.destroy();
dst.destroy();
slobrok.stop();
local.stop();
}
@Test
public void testExternMultipleSlobroks() throws ListenFailedException {
Slobrok local = new Slobrok();
TestServer srcServer = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession srcSession =
srcServer.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok extern = new Slobrok();
String spec = "tcp/localhost:" + extern.port();
TestServer dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
Receptor dstHandler = new Receptor();
DestinationSession dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
Reply reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
dstHandler.reset();
assertNull(dstHandler.getMessage(0));
extern = new Slobrok();
spec += ",tcp/localhost:" + extern.port();
dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
dstHandler = new Receptor();
dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
local.stop();
srcSession.destroy();
srcServer.destroy();
}
@Test
public void testLocalService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::0")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
lst.clear();
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService:broken]/chain.default"));
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
assertEquals(recipient, "docproc/cluster.default/*/chain.default");
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(1, lst.size());
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[LocalService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testLocalServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[LocalService]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[LocalService]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void multipleGetRepliesAreMergedToFoundDocument() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", getDocumentRouteSelectorRawConfig())
.addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::yarn"), AllFields.NAME));
List<RoutingNode> selected = frame.select(2);
for (int i = 0, len = selected.size(); i < len; ++i) {
Document doc = null;
if (i == 0) {
doc = new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::yarn"));
doc.setLastModified(123456L);
}
GetDocumentReply reply = new GetDocumentReply(null);
reply.setDocument(doc);
selected.get(i).handleReply(reply);
}
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_GETDOCUMENT, reply.getType());
assertEquals(123456L, ((GetDocumentReply)reply).getLastModified());
}
private String getDocumentRouteSelectorRawConfig() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"testdoc\"\n" +
"route[1].feed \"myfeed\"\n]";
}
@Test
public void remove_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createRemove("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createRemove("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
@Test
public void get_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createGet("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createGet("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
private PolicyTestFrame createFrameWithTwoRoutes() {
PolicyTestFrame result = new PolicyTestFrame(manager);
result.setHop(new HopSpec("test", createDocumentRouteSelectorConfigWithTwoRoutes())
.addRecipient("testdoc-route").addRecipient("other-route"));
return result;
}
private String createDocumentRouteSelectorConfigWithTwoRoutes() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"testdoc-route\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield != '0'\"\n" +
"route[0].feed \"\"\n" +
"route[1].name \"other-route\"\n" +
"route[1].selector \"other and other.intfield != '0'\"\n" +
"route[1].feed \"\"\n]";
}
private RemoveDocumentMessage createRemove(String docId) {
return new RemoveDocumentMessage(new DocumentId(docId));
}
private GetDocumentMessage createGet(String docId) {
return new GetDocumentMessage(new DocumentId(docId));
}
@Test
@Test
public void testSubsetServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[SubsetService:2]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[SubsetService:2]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void testDocumentRouteSelector() {
String okConfig = "raw:route[0]\n";
String errConfig = "raw:" +
"route[1]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"foo bar\"\n" +
"route[0].feed \"baz\"\n";
DocumentProtocol protocol = new DocumentProtocol(manager, okConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", errConfig) instanceof ErrorPolicy);
protocol = new DocumentProtocol(manager, errConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", okConfig) instanceof DocumentRouteSelectorPolicy);
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Message put = new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::")));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentSelectorDualCluster() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"(testdoc AND (testdoc.intfield / 1000 > 0))\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"(other AND (other.intfield / 1000 > 0))\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Document doc = new Document(manager.getDocumentType("testdoc"), new DocumentId("id:ns:testdoc::"));
doc.setFieldValue("intfield", 3000);
Message put = new PutDocumentMessage(new DocumentPut(doc));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentRouteSelectorIgnore() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[1]\n" +
"route[0].name \"docproc/cluster.foo\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield == 'foo'\"\n" +
"route[0].feed \"myfeed\"\n]").addRecipient("docproc/cluster.foo"));
frame.setMessage(new PutDocumentMessage(
new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:yarn:testdoc:n=1234:fluff")))));
frame.select(0);
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_DOCUMENTIGNORED, reply.getType());
assertEquals(0, reply.getNumErrors());
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("docproc/cluster.foo"));
frame.destroy();
}
@Test
public void testLoadBalancer() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.getNetwork().registerSession("0/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 1));
frame.setHop(new HopSpec("test", "[LoadBalancer:cluster=docproc/cluster.default;session=chain.default]"));
assertSelect(frame, 1, Arrays.asList(frame.getNetwork().getConnectionSpec() + "/chain.default"));
}
@Test
public void testRoundRobin() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "[RoundRobin]")
.addRecipient("docproc/cluster.default/3/chain.default")
.addRecipient("docproc/cluster.default/6/chain.default")
.addRecipient("docproc/cluster.default/9/chain.default"));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/6/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("6/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 9));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("3/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 8));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("9/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 7));
assertSelect(frame, 32, new ArrayList<>());
frame.setHop(new HopSpec("test", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.default"));
frame.assertMergeOneReply("docproc/cluster.default/0/chain.default");
frame.destroy();
}
@Test
public void testRoundRobinCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
/**
* Ensures that the given number of select passes on the given frame produces an expected list of recipients.
*
* @param frame The frame to select on.
* @param numSelects The number of selects to perform.
* @param expected The list of expected recipients.
*/
private static void assertSelect(PolicyTestFrame frame, int numSelects, List<String> expected) {
Set<String> lst = new TreeSet<>();
for (int i = 0; i < numSelects; ++i) {
if (!expected.isEmpty()) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
} else {
frame.select(0);
}
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(expected.size(), lst.size());
Iterator<String> it = lst.iterator();
for (String recipient : expected) {
assertEquals(recipient, it.next());
}
}
private static void assertMirrorReady(Mirror slobrok)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.ready()) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private static void assertMirrorContains(IMirror slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.lookup(pattern).size() == numEntries) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern)
throws InterruptedException, TimeoutException
{
setupExternPolicy(frame, slobrok, pattern, -1);
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
String param = "tcp/localhost:" + slobrok.port() + ";" + pattern;
frame.setHop(new HopSpec("test", "[Extern:" + param + "]"));
MessageBus mbus = frame.getMessageBus();
HopBlueprint hop = mbus.getRoutingTable(DocumentProtocol.NAME).getHop("test");
PolicyDirective dir = (PolicyDirective)hop.getDirective(0);
ExternPolicy policy = (ExternPolicy)mbus.getRoutingPolicy(DocumentProtocol.NAME, dir.getName(), dir.getParam());
assertMirrorReady(policy.getMirror());
if (numEntries >= 0) {
assertMirrorContains(policy.getMirror(), pattern, numEntries);
}
}
private PolicyTestFrame newFrame() {
return new PolicyTestFrame(manager);
}
private PolicyTestFrame newFrame(Message msg) {
PolicyTestFrame frame = newFrame();
frame.setMessage(msg);
return frame;
}
private PutDocumentMessage newPutDocument(String documentId) {
return new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId(documentId))));
}
private PolicyTestFrame newPutDocumentFrame(String documentId) {
return newFrame(newPutDocument(documentId));
}
} | class PolicyTestCase {
private static final int TIMEOUT = 300;
private static final TimeUnit TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final long TIMEOUT_MILLIS = TIMEOUT_UNIT.toMillis(TIMEOUT);
private final DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentTypeManagerConfigurer.configure(manager, "file:./test/cfg/testdoc.cfg");
}
@Test
public void testProtocol() {
DocumentProtocol protocol = new DocumentProtocol(manager);
RoutingPolicy policy = protocol.createPolicy("AND", null);
assertTrue(policy instanceof ANDPolicy);
policy = new DocumentProtocol(manager).createPolicy("DocumentRouteSelector", "raw:route[0]\n");
assertTrue(policy instanceof DocumentRouteSelectorPolicy);
policy = new DocumentProtocol(manager).createPolicy("Extern", "foo;bar/baz");
assertTrue(policy instanceof ExternPolicy);
policy = new DocumentProtocol(manager).createPolicy("LocalService", null);
assertTrue(policy instanceof LocalServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("RoundRobin", null);
assertTrue(policy instanceof RoundRobinPolicy);
policy = new DocumentProtocol(manager).createPolicy("SubsetService", null);
assertTrue(policy instanceof SubsetServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("LoadBalancer", "cluster=docproc/cluster.default;session=chain.default");
assertTrue(policy instanceof LoadBalancerPolicy);
}
@Test
public void testAND() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[AND]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("foo", "bar"));
frame.setHop(new HopSpec("test", "[AND:baz]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("baz"));
frame.setHop(new HopSpec("test", "[AND:foo]"));
frame.assertMergeOneReply("foo");
frame.setHop(new HopSpec("test", "[AND:foo bar]"));
frame.assertMergeTwoReplies("foo", "bar");
frame.destroy();
}
@Test
public void requireThatExternPolicyWithIllegalParamIsAnErrorPolicy() throws ListenFailedException {
Slobrok slobrok = new Slobrok();
String spec = "tcp/localhost:" + slobrok.port();
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", null) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", "") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";bar") instanceof ErrorPolicy);
}
@Test
public void requireThatExternPolicyWithUnknownPatternSelectsNone() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
setupExternPolicy(frame, new Slobrok(), "foo/bar");
frame.assertSelect(null);
}
@Test
public void requireThatExternPolicySelectsFromExternSlobrok() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
List<TestServer> servers = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
TestServer server = new TestServer("docproc/cluster.default/" + i, null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
servers.add(server);
}
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 10);
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
for (TestServer server : servers) {
server.destroy();
}
frame.destroy();
}
@Test
public void requireThatExternPolicyMergesOneReplyAsProtocol() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
TestServer server = new TestServer("docproc/cluster.default/0", null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 1);
frame.assertMergeOneReply(server.net.getConnectionSpec() + "/chain.default");
server.destroy();
frame.destroy();
}
@Test
public void testExternSend() throws Exception {
Slobrok local = new Slobrok();
TestServer src = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession ss = src.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok slobrok = new Slobrok();
TestServer itr = new TestServer("itr",
new RoutingTableSpec(DocumentProtocol.NAME)
.addRoute(new RouteSpec("default").addHop("dst"))
.addHop(new HopSpec("dst", "dst/session")),
slobrok, new DocumentProtocol(manager));
IntermediateSession is = itr.mb.createIntermediateSession("session", true, new Receptor(), new Receptor());
TestServer dst = new TestServer("dst", null, slobrok, new DocumentProtocol(manager));
DestinationSession ds = dst.mb.createDestinationSession("session", true, new Receptor());
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.getTrace().setLevel(9);
msg.setRoute(Route.parse("[Extern:tcp/localhost:" + slobrok.port() + ";itr/session] default"));
assertTrue(ss.send(msg).isAccepted());
assertNotNull(msg = ((Receptor)is.getMessageHandler()).getMessage(TIMEOUT));
is.forward(msg);
assertNotNull(msg = ((Receptor)ds.getMessageHandler()).getMessage(TIMEOUT));
ds.acknowledge(msg);
Reply reply = ((Receptor)is.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
is.forward(reply);
assertNotNull(reply = ((Receptor)ss.getReplyHandler()).getReply(TIMEOUT));
System.out.println(reply.getTrace().toString());
src.destroy();
itr.destroy();
dst.destroy();
slobrok.stop();
local.stop();
}
@Test
public void testExternMultipleSlobroks() throws ListenFailedException {
Slobrok local = new Slobrok();
TestServer srcServer = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession srcSession =
srcServer.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok extern = new Slobrok();
String spec = "tcp/localhost:" + extern.port();
TestServer dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
Receptor dstHandler = new Receptor();
DestinationSession dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
Reply reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
dstHandler.reset();
assertNull(dstHandler.getMessage(0));
extern = new Slobrok();
spec += ",tcp/localhost:" + extern.port();
dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
dstHandler = new Receptor();
dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
local.stop();
srcSession.destroy();
srcServer.destroy();
}
@Test
public void testLocalService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::0")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
lst.clear();
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService:broken]/chain.default"));
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
assertEquals(recipient, "docproc/cluster.default/*/chain.default");
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(1, lst.size());
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[LocalService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testLocalServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[LocalService]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[LocalService]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void multipleGetRepliesAreMergedToFoundDocument() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", getDocumentRouteSelectorRawConfig())
.addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::yarn"), AllFields.NAME));
List<RoutingNode> selected = frame.select(2);
for (int i = 0, len = selected.size(); i < len; ++i) {
Document doc = null;
if (i == 0) {
doc = new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::yarn"));
doc.setLastModified(123456L);
}
GetDocumentReply reply = new GetDocumentReply(null);
reply.setDocument(doc);
selected.get(i).handleReply(reply);
}
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_GETDOCUMENT, reply.getType());
assertEquals(123456L, ((GetDocumentReply)reply).getLastModified());
}
private String getDocumentRouteSelectorRawConfig() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"testdoc\"\n" +
"route[1].feed \"myfeed\"\n]";
}
@Test
public void remove_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createRemove("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createRemove("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
@Test
public void get_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createGet("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createGet("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
private PolicyTestFrame createFrameWithTwoRoutes() {
PolicyTestFrame result = new PolicyTestFrame(manager);
result.setHop(new HopSpec("test", createDocumentRouteSelectorConfigWithTwoRoutes())
.addRecipient("testdoc-route").addRecipient("other-route"));
return result;
}
private String createDocumentRouteSelectorConfigWithTwoRoutes() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"testdoc-route\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield != '0'\"\n" +
"route[0].feed \"\"\n" +
"route[1].name \"other-route\"\n" +
"route[1].selector \"other and other.intfield != '0'\"\n" +
"route[1].feed \"\"\n]";
}
private RemoveDocumentMessage createRemove(String docId) {
return new RemoveDocumentMessage(new DocumentId(docId));
}
private GetDocumentMessage createGet(String docId) {
return new GetDocumentMessage(new DocumentId(docId));
}
@Test
@Test
public void testSubsetServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[SubsetService:2]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[SubsetService:2]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void testDocumentRouteSelector() {
String okConfig = "raw:route[0]\n";
String errConfig = "raw:" +
"route[1]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"foo bar\"\n" +
"route[0].feed \"baz\"\n";
DocumentProtocol protocol = new DocumentProtocol(manager, okConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", errConfig) instanceof ErrorPolicy);
protocol = new DocumentProtocol(manager, errConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", okConfig) instanceof DocumentRouteSelectorPolicy);
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Message put = new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::")));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentSelectorDualCluster() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"(testdoc AND (testdoc.intfield / 1000 > 0))\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"(other AND (other.intfield / 1000 > 0))\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Document doc = new Document(manager.getDocumentType("testdoc"), new DocumentId("id:ns:testdoc::"));
doc.setFieldValue("intfield", 3000);
Message put = new PutDocumentMessage(new DocumentPut(doc));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentRouteSelectorIgnore() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[1]\n" +
"route[0].name \"docproc/cluster.foo\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield == 'foo'\"\n" +
"route[0].feed \"myfeed\"\n]").addRecipient("docproc/cluster.foo"));
frame.setMessage(new PutDocumentMessage(
new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:yarn:testdoc:n=1234:fluff")))));
frame.select(0);
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_DOCUMENTIGNORED, reply.getType());
assertEquals(0, reply.getNumErrors());
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("docproc/cluster.foo"));
frame.destroy();
}
@Test
public void testLoadBalancer() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.getNetwork().registerSession("0/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 1));
frame.setHop(new HopSpec("test", "[LoadBalancer:cluster=docproc/cluster.default;session=chain.default]"));
assertSelect(frame, 1, Arrays.asList(frame.getNetwork().getConnectionSpec() + "/chain.default"));
}
@Test
public void testRoundRobin() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "[RoundRobin]")
.addRecipient("docproc/cluster.default/3/chain.default")
.addRecipient("docproc/cluster.default/6/chain.default")
.addRecipient("docproc/cluster.default/9/chain.default"));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/6/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("6/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 9));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("3/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 8));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("9/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 7));
assertSelect(frame, 32, new ArrayList<>());
frame.setHop(new HopSpec("test", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.default"));
frame.assertMergeOneReply("docproc/cluster.default/0/chain.default");
frame.destroy();
}
@Test
public void testRoundRobinCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
/**
* Ensures that the given number of select passes on the given frame produces an expected list of recipients.
*
* @param frame The frame to select on.
* @param numSelects The number of selects to perform.
* @param expected The list of expected recipients.
*/
private static void assertSelect(PolicyTestFrame frame, int numSelects, List<String> expected) {
Set<String> lst = new TreeSet<>();
for (int i = 0; i < numSelects; ++i) {
if (!expected.isEmpty()) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
} else {
frame.select(0);
}
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(expected.size(), lst.size());
Iterator<String> it = lst.iterator();
for (String recipient : expected) {
assertEquals(recipient, it.next());
}
}
private static void assertMirrorReady(Mirror slobrok)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.ready()) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private static void assertMirrorContains(IMirror slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.lookup(pattern).size() == numEntries) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern)
throws InterruptedException, TimeoutException
{
setupExternPolicy(frame, slobrok, pattern, -1);
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
String param = "tcp/localhost:" + slobrok.port() + ";" + pattern;
frame.setHop(new HopSpec("test", "[Extern:" + param + "]"));
MessageBus mbus = frame.getMessageBus();
HopBlueprint hop = mbus.getRoutingTable(DocumentProtocol.NAME).getHop("test");
PolicyDirective dir = (PolicyDirective)hop.getDirective(0);
ExternPolicy policy = (ExternPolicy)mbus.getRoutingPolicy(DocumentProtocol.NAME, dir.getName(), dir.getParam());
assertMirrorReady(policy.getMirror());
if (numEntries >= 0) {
assertMirrorContains(policy.getMirror(), pattern, numEntries);
}
}
private PolicyTestFrame newFrame() {
return new PolicyTestFrame(manager);
}
private PolicyTestFrame newFrame(Message msg) {
PolicyTestFrame frame = newFrame();
frame.setMessage(msg);
return frame;
}
private PutDocumentMessage newPutDocument(String documentId) {
return new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId(documentId))));
}
private PolicyTestFrame newPutDocumentFrame(String documentId) {
return newFrame(newPutDocument(documentId));
}
} |
Not sure why 10 were used initially. | public void testSubsetService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))))));
int numberOfServices = 4;
frame.setHop(new HopSpec("test", "docproc/cluster.default/[SubsetService:2]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 1; i <= numberOfServices; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", i));
RoutingNode leaf = frame.select(1).get(0);
lst.add(leaf.getRoute().toString());
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertTrue(lst.size() > 1);
String prev = null;
for (int i = 1; i <= numberOfServices; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String next = leaf.getRoute().toString();
if (prev == null) {
assertNotNull(next);
} else {
assertNotEquals(prev, next);
}
prev = next;
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
lst.clear();
for (int i = 1; i <= numberOfServices; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String route = leaf.getRoute().toString();
lst.add(route);
frame.getNetwork().unregisterSession(route.substring(frame.getIdentity().length() + 1));
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", numberOfServices - i));
Reply reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE, route));
leaf.handleReply(reply);
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(numberOfServices, lst.size());
frame.setHop(new HopSpec("test", "[SubsetService:2]"));
frame.assertMergeOneReply("*");
frame.destroy();
} | frame.setHop(new HopSpec("test", "[SubsetService:2]")); | public void testSubsetService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))))));
int numberOfServices = 4;
frame.setHop(new HopSpec("test", "docproc/cluster.default/[SubsetService:2]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 1; i <= numberOfServices; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", i));
RoutingNode leaf = frame.select(1).get(0);
lst.add(leaf.getRoute().toString());
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertTrue(lst.size() > 1);
String prev = null;
for (int i = 1; i <= numberOfServices; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String next = leaf.getRoute().toString();
if (prev == null) {
assertNotNull(next);
} else {
assertNotEquals(prev, next);
}
prev = next;
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
lst.clear();
for (int i = 1; i <= numberOfServices; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String route = leaf.getRoute().toString();
lst.add(route);
frame.getNetwork().unregisterSession(route.substring(frame.getIdentity().length() + 1));
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", numberOfServices - i));
Reply reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE, route));
leaf.handleReply(reply);
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(numberOfServices, lst.size());
frame.setHop(new HopSpec("test", "[SubsetService:2]"));
frame.assertMergeOneReply("*");
frame.destroy();
} | class PolicyTestCase {
private static final int TIMEOUT = 300;
private static final TimeUnit TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final long TIMEOUT_MILLIS = TIMEOUT_UNIT.toMillis(TIMEOUT);
private final DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentTypeManagerConfigurer.configure(manager, "file:./test/cfg/testdoc.cfg");
}
@Test
public void testProtocol() {
DocumentProtocol protocol = new DocumentProtocol(manager);
RoutingPolicy policy = protocol.createPolicy("AND", null);
assertTrue(policy instanceof ANDPolicy);
policy = new DocumentProtocol(manager).createPolicy("DocumentRouteSelector", "raw:route[0]\n");
assertTrue(policy instanceof DocumentRouteSelectorPolicy);
policy = new DocumentProtocol(manager).createPolicy("Extern", "foo;bar/baz");
assertTrue(policy instanceof ExternPolicy);
policy = new DocumentProtocol(manager).createPolicy("LocalService", null);
assertTrue(policy instanceof LocalServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("RoundRobin", null);
assertTrue(policy instanceof RoundRobinPolicy);
policy = new DocumentProtocol(manager).createPolicy("SubsetService", null);
assertTrue(policy instanceof SubsetServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("LoadBalancer", "cluster=docproc/cluster.default;session=chain.default");
assertTrue(policy instanceof LoadBalancerPolicy);
}
@Test
public void testAND() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[AND]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("foo", "bar"));
frame.setHop(new HopSpec("test", "[AND:baz]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("baz"));
frame.setHop(new HopSpec("test", "[AND:foo]"));
frame.assertMergeOneReply("foo");
frame.setHop(new HopSpec("test", "[AND:foo bar]"));
frame.assertMergeTwoReplies("foo", "bar");
frame.destroy();
}
@Test
public void requireThatExternPolicyWithIllegalParamIsAnErrorPolicy() throws ListenFailedException {
Slobrok slobrok = new Slobrok();
String spec = "tcp/localhost:" + slobrok.port();
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", null) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", "") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";bar") instanceof ErrorPolicy);
}
@Test
public void requireThatExternPolicyWithUnknownPatternSelectsNone() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
setupExternPolicy(frame, new Slobrok(), "foo/bar");
frame.assertSelect(null);
}
@Test
public void requireThatExternPolicySelectsFromExternSlobrok() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
List<TestServer> servers = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
TestServer server = new TestServer("docproc/cluster.default/" + i, null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
servers.add(server);
}
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 10);
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
for (TestServer server : servers) {
server.destroy();
}
frame.destroy();
}
@Test
public void requireThatExternPolicyMergesOneReplyAsProtocol() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
TestServer server = new TestServer("docproc/cluster.default/0", null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 1);
frame.assertMergeOneReply(server.net.getConnectionSpec() + "/chain.default");
server.destroy();
frame.destroy();
}
@Test
public void testExternSend() throws Exception {
Slobrok local = new Slobrok();
TestServer src = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession ss = src.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok slobrok = new Slobrok();
TestServer itr = new TestServer("itr",
new RoutingTableSpec(DocumentProtocol.NAME)
.addRoute(new RouteSpec("default").addHop("dst"))
.addHop(new HopSpec("dst", "dst/session")),
slobrok, new DocumentProtocol(manager));
IntermediateSession is = itr.mb.createIntermediateSession("session", true, new Receptor(), new Receptor());
TestServer dst = new TestServer("dst", null, slobrok, new DocumentProtocol(manager));
DestinationSession ds = dst.mb.createDestinationSession("session", true, new Receptor());
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.getTrace().setLevel(9);
msg.setRoute(Route.parse("[Extern:tcp/localhost:" + slobrok.port() + ";itr/session] default"));
assertTrue(ss.send(msg).isAccepted());
assertNotNull(msg = ((Receptor)is.getMessageHandler()).getMessage(TIMEOUT));
is.forward(msg);
assertNotNull(msg = ((Receptor)ds.getMessageHandler()).getMessage(TIMEOUT));
ds.acknowledge(msg);
Reply reply = ((Receptor)is.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
is.forward(reply);
assertNotNull(reply = ((Receptor)ss.getReplyHandler()).getReply(TIMEOUT));
System.out.println(reply.getTrace().toString());
src.destroy();
itr.destroy();
dst.destroy();
slobrok.stop();
local.stop();
}
@Test
public void testExternMultipleSlobroks() throws ListenFailedException {
Slobrok local = new Slobrok();
TestServer srcServer = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession srcSession =
srcServer.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok extern = new Slobrok();
String spec = "tcp/localhost:" + extern.port();
TestServer dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
Receptor dstHandler = new Receptor();
DestinationSession dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
Reply reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
dstHandler.reset();
assertNull(dstHandler.getMessage(0));
extern = new Slobrok();
spec += ",tcp/localhost:" + extern.port();
dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
dstHandler = new Receptor();
dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
local.stop();
srcSession.destroy();
srcServer.destroy();
}
@Test
public void testLocalService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::0")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
lst.clear();
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService:broken]/chain.default"));
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
assertEquals(recipient, "docproc/cluster.default/*/chain.default");
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(1, lst.size());
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[LocalService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testLocalServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[LocalService]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[LocalService]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void multipleGetRepliesAreMergedToFoundDocument() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", getDocumentRouteSelectorRawConfig())
.addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::yarn"), AllFields.NAME));
List<RoutingNode> selected = frame.select(2);
for (int i = 0, len = selected.size(); i < len; ++i) {
Document doc = null;
if (i == 0) {
doc = new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::yarn"));
doc.setLastModified(123456L);
}
GetDocumentReply reply = new GetDocumentReply(null);
reply.setDocument(doc);
selected.get(i).handleReply(reply);
}
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_GETDOCUMENT, reply.getType());
assertEquals(123456L, ((GetDocumentReply)reply).getLastModified());
}
private String getDocumentRouteSelectorRawConfig() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"testdoc\"\n" +
"route[1].feed \"myfeed\"\n]";
}
@Test
public void remove_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createRemove("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createRemove("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
@Test
public void get_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createGet("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createGet("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
private PolicyTestFrame createFrameWithTwoRoutes() {
PolicyTestFrame result = new PolicyTestFrame(manager);
result.setHop(new HopSpec("test", createDocumentRouteSelectorConfigWithTwoRoutes())
.addRecipient("testdoc-route").addRecipient("other-route"));
return result;
}
private String createDocumentRouteSelectorConfigWithTwoRoutes() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"testdoc-route\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield != '0'\"\n" +
"route[0].feed \"\"\n" +
"route[1].name \"other-route\"\n" +
"route[1].selector \"other and other.intfield != '0'\"\n" +
"route[1].feed \"\"\n]";
}
private RemoveDocumentMessage createRemove(String docId) {
return new RemoveDocumentMessage(new DocumentId(docId));
}
private GetDocumentMessage createGet(String docId) {
return new GetDocumentMessage(new DocumentId(docId));
}
@Test
@Test
public void testSubsetServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[SubsetService:2]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[SubsetService:2]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void testDocumentRouteSelector() {
String okConfig = "raw:route[0]\n";
String errConfig = "raw:" +
"route[1]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"foo bar\"\n" +
"route[0].feed \"baz\"\n";
DocumentProtocol protocol = new DocumentProtocol(manager, okConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", errConfig) instanceof ErrorPolicy);
protocol = new DocumentProtocol(manager, errConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", okConfig) instanceof DocumentRouteSelectorPolicy);
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Message put = new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::")));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentSelectorDualCluster() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"(testdoc AND (testdoc.intfield / 1000 > 0))\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"(other AND (other.intfield / 1000 > 0))\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Document doc = new Document(manager.getDocumentType("testdoc"), new DocumentId("id:ns:testdoc::"));
doc.setFieldValue("intfield", 3000);
Message put = new PutDocumentMessage(new DocumentPut(doc));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentRouteSelectorIgnore() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[1]\n" +
"route[0].name \"docproc/cluster.foo\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield == 'foo'\"\n" +
"route[0].feed \"myfeed\"\n]").addRecipient("docproc/cluster.foo"));
frame.setMessage(new PutDocumentMessage(
new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:yarn:testdoc:n=1234:fluff")))));
frame.select(0);
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_DOCUMENTIGNORED, reply.getType());
assertEquals(0, reply.getNumErrors());
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("docproc/cluster.foo"));
frame.destroy();
}
@Test
public void testLoadBalancer() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.getNetwork().registerSession("0/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 1));
frame.setHop(new HopSpec("test", "[LoadBalancer:cluster=docproc/cluster.default;session=chain.default]"));
assertSelect(frame, 1, Arrays.asList(frame.getNetwork().getConnectionSpec() + "/chain.default"));
}
@Test
public void testRoundRobin() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "[RoundRobin]")
.addRecipient("docproc/cluster.default/3/chain.default")
.addRecipient("docproc/cluster.default/6/chain.default")
.addRecipient("docproc/cluster.default/9/chain.default"));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/6/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("6/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 9));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("3/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 8));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("9/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 7));
assertSelect(frame, 32, new ArrayList<>());
frame.setHop(new HopSpec("test", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.default"));
frame.assertMergeOneReply("docproc/cluster.default/0/chain.default");
frame.destroy();
}
@Test
public void testRoundRobinCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
/**
* Ensures that the given number of select passes on the given frame produces an expected list of recipients.
*
* @param frame The frame to select on.
* @param numSelects The number of selects to perform.
* @param expected The list of expected recipients.
*/
private static void assertSelect(PolicyTestFrame frame, int numSelects, List<String> expected) {
Set<String> lst = new TreeSet<>();
for (int i = 0; i < numSelects; ++i) {
if (!expected.isEmpty()) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
} else {
frame.select(0);
}
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(expected.size(), lst.size());
Iterator<String> it = lst.iterator();
for (String recipient : expected) {
assertEquals(recipient, it.next());
}
}
private static void assertMirrorReady(Mirror slobrok)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.ready()) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private static void assertMirrorContains(IMirror slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.lookup(pattern).size() == numEntries) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern)
throws InterruptedException, TimeoutException
{
setupExternPolicy(frame, slobrok, pattern, -1);
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
String param = "tcp/localhost:" + slobrok.port() + ";" + pattern;
frame.setHop(new HopSpec("test", "[Extern:" + param + "]"));
MessageBus mbus = frame.getMessageBus();
HopBlueprint hop = mbus.getRoutingTable(DocumentProtocol.NAME).getHop("test");
PolicyDirective dir = (PolicyDirective)hop.getDirective(0);
ExternPolicy policy = (ExternPolicy)mbus.getRoutingPolicy(DocumentProtocol.NAME, dir.getName(), dir.getParam());
assertMirrorReady(policy.getMirror());
if (numEntries >= 0) {
assertMirrorContains(policy.getMirror(), pattern, numEntries);
}
}
private PolicyTestFrame newFrame() {
return new PolicyTestFrame(manager);
}
private PolicyTestFrame newFrame(Message msg) {
PolicyTestFrame frame = newFrame();
frame.setMessage(msg);
return frame;
}
private PutDocumentMessage newPutDocument(String documentId) {
return new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId(documentId))));
}
private PolicyTestFrame newPutDocumentFrame(String documentId) {
return newFrame(newPutDocument(documentId));
}
} | class PolicyTestCase {
private static final int TIMEOUT = 300;
private static final TimeUnit TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final long TIMEOUT_MILLIS = TIMEOUT_UNIT.toMillis(TIMEOUT);
private final DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentTypeManagerConfigurer.configure(manager, "file:./test/cfg/testdoc.cfg");
}
@Test
public void testProtocol() {
DocumentProtocol protocol = new DocumentProtocol(manager);
RoutingPolicy policy = protocol.createPolicy("AND", null);
assertTrue(policy instanceof ANDPolicy);
policy = new DocumentProtocol(manager).createPolicy("DocumentRouteSelector", "raw:route[0]\n");
assertTrue(policy instanceof DocumentRouteSelectorPolicy);
policy = new DocumentProtocol(manager).createPolicy("Extern", "foo;bar/baz");
assertTrue(policy instanceof ExternPolicy);
policy = new DocumentProtocol(manager).createPolicy("LocalService", null);
assertTrue(policy instanceof LocalServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("RoundRobin", null);
assertTrue(policy instanceof RoundRobinPolicy);
policy = new DocumentProtocol(manager).createPolicy("SubsetService", null);
assertTrue(policy instanceof SubsetServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("LoadBalancer", "cluster=docproc/cluster.default;session=chain.default");
assertTrue(policy instanceof LoadBalancerPolicy);
}
@Test
public void testAND() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[AND]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("foo", "bar"));
frame.setHop(new HopSpec("test", "[AND:baz]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("baz"));
frame.setHop(new HopSpec("test", "[AND:foo]"));
frame.assertMergeOneReply("foo");
frame.setHop(new HopSpec("test", "[AND:foo bar]"));
frame.assertMergeTwoReplies("foo", "bar");
frame.destroy();
}
@Test
public void requireThatExternPolicyWithIllegalParamIsAnErrorPolicy() throws ListenFailedException {
Slobrok slobrok = new Slobrok();
String spec = "tcp/localhost:" + slobrok.port();
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", null) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", "") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";bar") instanceof ErrorPolicy);
}
@Test
public void requireThatExternPolicyWithUnknownPatternSelectsNone() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
setupExternPolicy(frame, new Slobrok(), "foo/bar");
frame.assertSelect(null);
}
@Test
public void requireThatExternPolicySelectsFromExternSlobrok() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
List<TestServer> servers = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
TestServer server = new TestServer("docproc/cluster.default/" + i, null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
servers.add(server);
}
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 10);
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
for (TestServer server : servers) {
server.destroy();
}
frame.destroy();
}
@Test
public void requireThatExternPolicyMergesOneReplyAsProtocol() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
TestServer server = new TestServer("docproc/cluster.default/0", null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 1);
frame.assertMergeOneReply(server.net.getConnectionSpec() + "/chain.default");
server.destroy();
frame.destroy();
}
@Test
public void testExternSend() throws Exception {
Slobrok local = new Slobrok();
TestServer src = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession ss = src.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok slobrok = new Slobrok();
TestServer itr = new TestServer("itr",
new RoutingTableSpec(DocumentProtocol.NAME)
.addRoute(new RouteSpec("default").addHop("dst"))
.addHop(new HopSpec("dst", "dst/session")),
slobrok, new DocumentProtocol(manager));
IntermediateSession is = itr.mb.createIntermediateSession("session", true, new Receptor(), new Receptor());
TestServer dst = new TestServer("dst", null, slobrok, new DocumentProtocol(manager));
DestinationSession ds = dst.mb.createDestinationSession("session", true, new Receptor());
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.getTrace().setLevel(9);
msg.setRoute(Route.parse("[Extern:tcp/localhost:" + slobrok.port() + ";itr/session] default"));
assertTrue(ss.send(msg).isAccepted());
assertNotNull(msg = ((Receptor)is.getMessageHandler()).getMessage(TIMEOUT));
is.forward(msg);
assertNotNull(msg = ((Receptor)ds.getMessageHandler()).getMessage(TIMEOUT));
ds.acknowledge(msg);
Reply reply = ((Receptor)is.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
is.forward(reply);
assertNotNull(reply = ((Receptor)ss.getReplyHandler()).getReply(TIMEOUT));
System.out.println(reply.getTrace().toString());
src.destroy();
itr.destroy();
dst.destroy();
slobrok.stop();
local.stop();
}
@Test
public void testExternMultipleSlobroks() throws ListenFailedException {
Slobrok local = new Slobrok();
TestServer srcServer = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession srcSession =
srcServer.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok extern = new Slobrok();
String spec = "tcp/localhost:" + extern.port();
TestServer dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
Receptor dstHandler = new Receptor();
DestinationSession dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
Reply reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
dstHandler.reset();
assertNull(dstHandler.getMessage(0));
extern = new Slobrok();
spec += ",tcp/localhost:" + extern.port();
dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
dstHandler = new Receptor();
dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
local.stop();
srcSession.destroy();
srcServer.destroy();
}
@Test
public void testLocalService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::0")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
lst.clear();
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService:broken]/chain.default"));
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
assertEquals(recipient, "docproc/cluster.default/*/chain.default");
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(1, lst.size());
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[LocalService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testLocalServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[LocalService]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[LocalService]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void multipleGetRepliesAreMergedToFoundDocument() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", getDocumentRouteSelectorRawConfig())
.addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::yarn"), AllFields.NAME));
List<RoutingNode> selected = frame.select(2);
for (int i = 0, len = selected.size(); i < len; ++i) {
Document doc = null;
if (i == 0) {
doc = new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::yarn"));
doc.setLastModified(123456L);
}
GetDocumentReply reply = new GetDocumentReply(null);
reply.setDocument(doc);
selected.get(i).handleReply(reply);
}
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_GETDOCUMENT, reply.getType());
assertEquals(123456L, ((GetDocumentReply)reply).getLastModified());
}
private String getDocumentRouteSelectorRawConfig() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"testdoc\"\n" +
"route[1].feed \"myfeed\"\n]";
}
@Test
public void remove_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createRemove("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createRemove("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
@Test
public void get_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createGet("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createGet("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
private PolicyTestFrame createFrameWithTwoRoutes() {
PolicyTestFrame result = new PolicyTestFrame(manager);
result.setHop(new HopSpec("test", createDocumentRouteSelectorConfigWithTwoRoutes())
.addRecipient("testdoc-route").addRecipient("other-route"));
return result;
}
private String createDocumentRouteSelectorConfigWithTwoRoutes() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"testdoc-route\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield != '0'\"\n" +
"route[0].feed \"\"\n" +
"route[1].name \"other-route\"\n" +
"route[1].selector \"other and other.intfield != '0'\"\n" +
"route[1].feed \"\"\n]";
}
private RemoveDocumentMessage createRemove(String docId) {
return new RemoveDocumentMessage(new DocumentId(docId));
}
private GetDocumentMessage createGet(String docId) {
return new GetDocumentMessage(new DocumentId(docId));
}
@Test
@Test
public void testSubsetServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[SubsetService:2]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[SubsetService:2]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void testDocumentRouteSelector() {
String okConfig = "raw:route[0]\n";
String errConfig = "raw:" +
"route[1]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"foo bar\"\n" +
"route[0].feed \"baz\"\n";
DocumentProtocol protocol = new DocumentProtocol(manager, okConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", errConfig) instanceof ErrorPolicy);
protocol = new DocumentProtocol(manager, errConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", okConfig) instanceof DocumentRouteSelectorPolicy);
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Message put = new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::")));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentSelectorDualCluster() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"(testdoc AND (testdoc.intfield / 1000 > 0))\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"(other AND (other.intfield / 1000 > 0))\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Document doc = new Document(manager.getDocumentType("testdoc"), new DocumentId("id:ns:testdoc::"));
doc.setFieldValue("intfield", 3000);
Message put = new PutDocumentMessage(new DocumentPut(doc));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentRouteSelectorIgnore() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[1]\n" +
"route[0].name \"docproc/cluster.foo\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield == 'foo'\"\n" +
"route[0].feed \"myfeed\"\n]").addRecipient("docproc/cluster.foo"));
frame.setMessage(new PutDocumentMessage(
new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:yarn:testdoc:n=1234:fluff")))));
frame.select(0);
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_DOCUMENTIGNORED, reply.getType());
assertEquals(0, reply.getNumErrors());
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("docproc/cluster.foo"));
frame.destroy();
}
@Test
public void testLoadBalancer() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.getNetwork().registerSession("0/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 1));
frame.setHop(new HopSpec("test", "[LoadBalancer:cluster=docproc/cluster.default;session=chain.default]"));
assertSelect(frame, 1, Arrays.asList(frame.getNetwork().getConnectionSpec() + "/chain.default"));
}
@Test
public void testRoundRobin() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "[RoundRobin]")
.addRecipient("docproc/cluster.default/3/chain.default")
.addRecipient("docproc/cluster.default/6/chain.default")
.addRecipient("docproc/cluster.default/9/chain.default"));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/6/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("6/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 9));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("3/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 8));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("9/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 7));
assertSelect(frame, 32, new ArrayList<>());
frame.setHop(new HopSpec("test", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.default"));
frame.assertMergeOneReply("docproc/cluster.default/0/chain.default");
frame.destroy();
}
@Test
public void testRoundRobinCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
/**
* Ensures that the given number of select passes on the given frame produces an expected list of recipients.
*
* @param frame The frame to select on.
* @param numSelects The number of selects to perform.
* @param expected The list of expected recipients.
*/
private static void assertSelect(PolicyTestFrame frame, int numSelects, List<String> expected) {
Set<String> lst = new TreeSet<>();
for (int i = 0; i < numSelects; ++i) {
if (!expected.isEmpty()) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
} else {
frame.select(0);
}
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(expected.size(), lst.size());
Iterator<String> it = lst.iterator();
for (String recipient : expected) {
assertEquals(recipient, it.next());
}
}
private static void assertMirrorReady(Mirror slobrok)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.ready()) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private static void assertMirrorContains(IMirror slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.lookup(pattern).size() == numEntries) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern)
throws InterruptedException, TimeoutException
{
setupExternPolicy(frame, slobrok, pattern, -1);
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
String param = "tcp/localhost:" + slobrok.port() + ";" + pattern;
frame.setHop(new HopSpec("test", "[Extern:" + param + "]"));
MessageBus mbus = frame.getMessageBus();
HopBlueprint hop = mbus.getRoutingTable(DocumentProtocol.NAME).getHop("test");
PolicyDirective dir = (PolicyDirective)hop.getDirective(0);
ExternPolicy policy = (ExternPolicy)mbus.getRoutingPolicy(DocumentProtocol.NAME, dir.getName(), dir.getParam());
assertMirrorReady(policy.getMirror());
if (numEntries >= 0) {
assertMirrorContains(policy.getMirror(), pattern, numEntries);
}
}
private PolicyTestFrame newFrame() {
return new PolicyTestFrame(manager);
}
private PolicyTestFrame newFrame(Message msg) {
PolicyTestFrame frame = newFrame();
frame.setMessage(msg);
return frame;
}
private PutDocumentMessage newPutDocument(String documentId) {
return new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId(documentId))));
}
private PolicyTestFrame newPutDocumentFrame(String documentId) {
return newFrame(newPutDocument(documentId));
}
} |
Would lead to collision if multiple CCs on same host, from different clusters. This isn't something we support, anyway. If there are overlapping CCs, they're joined. Otherwise they're disjoint, or there's a single cluster. | public boolean requiresWantedPort() {
return false;
} | return false; | public boolean requiresWantedPort() {
return false;
} | class ClusterControllerContainer extends Container implements
PlatformBundlesConfig.Producer,
ZookeeperServerConfig.Producer,
ReindexingConfig.Producer
{
private static final ComponentSpecification CLUSTERCONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-apps");
private static final ComponentSpecification ZOOKEEPER_SERVER_BUNDLE = new ComponentSpecification("zookeeper-server");
private static final ComponentSpecification REINDEXING_CONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-reindexer");
private final Set<String> bundles = new TreeSet<>();
private final ModelContext.FeatureFlags featureFlags;
public ClusterControllerContainer(
AbstractConfigProducer<?> parent,
int index,
boolean runStandaloneZooKeeper,
DeployState deployState) {
super(parent, "" + index, index, deployState.isHosted());
this.featureFlags = deployState.featureFlags();
addHandler("clustercontroller-status",
"com.yahoo.vespa.clustercontroller.apps.clustercontroller.StatusHandler",
"/clustercontroller-status/*",
CLUSTERCONTROLLER_BUNDLE);
addHandler("clustercontroller-state-restapi-v2",
"com.yahoo.vespa.clustercontroller.apps.clustercontroller.StateRestApiV2Handler",
"/cluster/v2/*",
CLUSTERCONTROLLER_BUNDLE);
addComponent(new AccessLogComponent(containerCluster().orElse(null), AccessLogComponent.AccessLogType.jsonAccessLog,
AccessLogComponent.CompressionType.GZIP,
"controller",
deployState.isHosted()));
addFileBundle("clustercontroller-apps");
addFileBundle("clustercontroller-core");
addFileBundle("clustercontroller-utils");
addFileBundle("zookeeper-server");
configureReindexing();
configureZooKeeperServer(runStandaloneZooKeeper, deployState.featureFlags().reconfigurableZookeeperServer());
}
@Override
public int getWantedPort() {
return 19050;
}
@Override
@Override
public ContainerServiceType myServiceType() {
return ContainerServiceType.CLUSTERCONTROLLER_CONTAINER;
}
private void configureZooKeeperServer(boolean runStandaloneZooKeeper, boolean reconfigurable) {
if (reconfigurable) {
ContainerModelBuilder.addReconfigurableZooKeeperServerComponents(this);
} else {
addComponent("clustercontroller-zookeeper-server",
runStandaloneZooKeeper
? "com.yahoo.vespa.zookeeper.VespaZooKeeperServerImpl"
: "com.yahoo.vespa.zookeeper.DummyVespaZooKeeperServer",
ZOOKEEPER_SERVER_BUNDLE);
}
}
private void addHandler(Handler<?> h, String path) {
h.addServerBindings(SystemBindingPattern.fromHttpPath(path));
super.addHandler(h);
}
private void addFileBundle(String bundleName) {
bundles.add(PlatformBundles.absoluteBundlePath(bundleName).toString());
}
private ComponentModel createComponentModel(String id, String className, ComponentSpecification bundle) {
return new ComponentModel(new BundleInstantiationSpecification(new ComponentSpecification(id),
new ComponentSpecification(className),
bundle));
}
private void addComponent(String id, String className, ComponentSpecification bundle) {
addComponent(new Component<>(createComponentModel(id, className, bundle)));
}
private void addHandler(String id, String className, String path, ComponentSpecification bundle) {
addHandler(new Handler<>(createComponentModel(id, className, bundle)), path);
}
private ReindexingContext reindexingContext() {
return ((ClusterControllerContainerCluster) parent).reindexingContext();
}
private void configureReindexing() {
addFileBundle(REINDEXING_CONTROLLER_BUNDLE.getName());
addComponent(new SimpleComponent(DocumentAccessProvider.class.getName()));
addComponent("reindexing-maintainer",
"ai.vespa.reindexing.ReindexingMaintainer",
REINDEXING_CONTROLLER_BUNDLE);
addHandler("reindexing-status",
"ai.vespa.reindexing.http.ReindexingV1ApiHandler",
"/reindexing/v1/*",
REINDEXING_CONTROLLER_BUNDLE);
}
@Override
public void getConfig(PlatformBundlesConfig.Builder builder) {
bundles.forEach(builder::bundlePaths);
}
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
builder.myid(index());
builder.dynamicReconfiguration(featureFlags.reconfigurableZookeeperServer());
}
@Override
public void getConfig(ReindexingConfig.Builder builder) {
ReindexingContext ctx = reindexingContext();
if (!ctx.reindexing().enabled()) {
builder.enabled(false);
return;
}
builder.enabled(ctx.reindexing().enabled());
for (String clusterId : ctx.clusterIds()) {
ReindexingConfig.Clusters.Builder clusterBuilder = new ReindexingConfig.Clusters.Builder();
for (NewDocumentType type : ctx.documentTypesForCluster(clusterId)) {
String typeName = type.getFullName().getName();
ctx.reindexing().status(clusterId, typeName).ifPresent(
status -> clusterBuilder.documentTypes(
typeName,
new ReindexingConfig.Clusters.DocumentTypes.Builder()
.readyAtMillis(status.ready().toEpochMilli())));
}
builder.clusters(clusterId, clusterBuilder);
}
}
} | class ClusterControllerContainer extends Container implements
PlatformBundlesConfig.Producer,
ZookeeperServerConfig.Producer,
ReindexingConfig.Producer
{
private static final ComponentSpecification CLUSTERCONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-apps");
private static final ComponentSpecification ZOOKEEPER_SERVER_BUNDLE = new ComponentSpecification("zookeeper-server");
private static final ComponentSpecification REINDEXING_CONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-reindexer");
private final Set<String> bundles = new TreeSet<>();
private final ModelContext.FeatureFlags featureFlags;
public ClusterControllerContainer(
AbstractConfigProducer<?> parent,
int index,
boolean runStandaloneZooKeeper,
DeployState deployState) {
super(parent, "" + index, index, deployState.isHosted());
this.featureFlags = deployState.featureFlags();
addHandler("clustercontroller-status",
"com.yahoo.vespa.clustercontroller.apps.clustercontroller.StatusHandler",
"/clustercontroller-status/*",
CLUSTERCONTROLLER_BUNDLE);
addHandler("clustercontroller-state-restapi-v2",
"com.yahoo.vespa.clustercontroller.apps.clustercontroller.StateRestApiV2Handler",
"/cluster/v2/*",
CLUSTERCONTROLLER_BUNDLE);
addComponent(new AccessLogComponent(containerCluster().orElse(null), AccessLogComponent.AccessLogType.jsonAccessLog,
AccessLogComponent.CompressionType.GZIP,
"controller",
deployState.isHosted()));
addFileBundle("clustercontroller-apps");
addFileBundle("clustercontroller-core");
addFileBundle("clustercontroller-utils");
addFileBundle("zookeeper-server");
configureReindexing();
configureZooKeeperServer(runStandaloneZooKeeper, deployState.featureFlags().reconfigurableZookeeperServer());
}
@Override
public int getWantedPort() {
return 19050;
}
@Override
@Override
public ContainerServiceType myServiceType() {
return ContainerServiceType.CLUSTERCONTROLLER_CONTAINER;
}
private void configureZooKeeperServer(boolean runStandaloneZooKeeper, boolean reconfigurable) {
if (reconfigurable) {
ContainerModelBuilder.addReconfigurableZooKeeperServerComponents(this);
} else {
addComponent("clustercontroller-zookeeper-server",
runStandaloneZooKeeper
? "com.yahoo.vespa.zookeeper.VespaZooKeeperServerImpl"
: "com.yahoo.vespa.zookeeper.DummyVespaZooKeeperServer",
ZOOKEEPER_SERVER_BUNDLE);
}
}
private void addHandler(Handler<?> h, String path) {
h.addServerBindings(SystemBindingPattern.fromHttpPath(path));
super.addHandler(h);
}
private void addFileBundle(String bundleName) {
bundles.add(PlatformBundles.absoluteBundlePath(bundleName).toString());
}
private ComponentModel createComponentModel(String id, String className, ComponentSpecification bundle) {
return new ComponentModel(new BundleInstantiationSpecification(new ComponentSpecification(id),
new ComponentSpecification(className),
bundle));
}
private void addComponent(String id, String className, ComponentSpecification bundle) {
addComponent(new Component<>(createComponentModel(id, className, bundle)));
}
private void addHandler(String id, String className, String path, ComponentSpecification bundle) {
addHandler(new Handler<>(createComponentModel(id, className, bundle)), path);
}
private ReindexingContext reindexingContext() {
return ((ClusterControllerContainerCluster) parent).reindexingContext();
}
private void configureReindexing() {
addFileBundle(REINDEXING_CONTROLLER_BUNDLE.getName());
addComponent(new SimpleComponent(DocumentAccessProvider.class.getName()));
addComponent("reindexing-maintainer",
"ai.vespa.reindexing.ReindexingMaintainer",
REINDEXING_CONTROLLER_BUNDLE);
addHandler("reindexing-status",
"ai.vespa.reindexing.http.ReindexingV1ApiHandler",
"/reindexing/v1/*",
REINDEXING_CONTROLLER_BUNDLE);
}
@Override
public void getConfig(PlatformBundlesConfig.Builder builder) {
bundles.forEach(builder::bundlePaths);
}
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
builder.myid(index());
builder.dynamicReconfiguration(featureFlags.reconfigurableZookeeperServer());
}
@Override
public void getConfig(ReindexingConfig.Builder builder) {
ReindexingContext ctx = reindexingContext();
if (!ctx.reindexing().enabled()) {
builder.enabled(false);
return;
}
builder.enabled(ctx.reindexing().enabled());
for (String clusterId : ctx.clusterIds()) {
ReindexingConfig.Clusters.Builder clusterBuilder = new ReindexingConfig.Clusters.Builder();
for (NewDocumentType type : ctx.documentTypesForCluster(clusterId)) {
String typeName = type.getFullName().getName();
ctx.reindexing().status(clusterId, typeName).ifPresent(
status -> clusterBuilder.documentTypes(
typeName,
new ReindexingConfig.Clusters.DocumentTypes.Builder()
.readyAtMillis(status.ready().toEpochMilli())));
}
builder.clusters(clusterId, clusterBuilder);
}
}
} |
Since we wait 10 minutes after last deployment with migration I think most deployments will not migrate anyway, but I guess this is OK. | protected Set<ApplicationId> applicationsNeedingMaintenance() {
if (deployer().bootstrapping()) return Set.of();
ZonedDateTime date = ZonedDateTime.ofInstant(clock().instant(), java.time.ZoneId.of("Europe/Oslo"));
if ( ! nodeRepository().zone().system().isCd()
&& nodeRepository().zone().environment() != Environment.staging
&& (List.of(SATURDAY, SUNDAY).contains(date.getDayOfWeek()) || date.getHour() < 8 || 12 < date.getHour()))
return Set.of();
return nodeRepository().applications().ids().stream()
.sorted()
.filter(this::isEligible)
.filter(this::hasNotSwitched)
.filter(this::isQuiescent)
.limit(1)
.collect(toUnmodifiableSet());
} | && nodeRepository().zone().environment() != Environment.staging | protected Set<ApplicationId> applicationsNeedingMaintenance() {
if (deployer().bootstrapping()) return Set.of();
ZonedDateTime date = ZonedDateTime.ofInstant(clock().instant(), java.time.ZoneId.of("Europe/Oslo"));
if ( ! nodeRepository().zone().system().isCd()
&& nodeRepository().zone().environment() != Environment.staging
&& (List.of(SATURDAY, SUNDAY).contains(date.getDayOfWeek()) || date.getHour() < 8 || 12 < date.getHour()))
return Set.of();
return nodeRepository().applications().ids().stream()
.sorted()
.filter(this::isEligible)
.filter(this::hasNotSwitched)
.filter(this::isQuiescent)
.limit(1)
.collect(toUnmodifiableSet());
} | class DedicatedClusterControllerClusterMigrator extends ApplicationMaintainer {
private final BooleanFlag flag;
private final Orchestrator orchestrator;
protected DedicatedClusterControllerClusterMigrator(Deployer deployer, Metric metric, NodeRepository nodeRepository,
Duration interval, FlagSource flags, Orchestrator orchestrator) {
super(deployer, metric, nodeRepository, interval);
this.flag = Flags.DEDICATED_CLUSTER_CONTROLLER_CLUSTER.bindTo(flags);
this.orchestrator = orchestrator;
}
@Override
@Override
protected void deploy(ApplicationId id) {
migrate(id);
super.deploy(id);
}
private boolean isEligible(ApplicationId id) {
return deployer().lastDeployTime(id).map(at -> at.isBefore(clock().instant().minus(Duration.ofMinutes(10)))).orElse(false)
&& flag.with(FetchVector.Dimension.APPLICATION_ID, id.serializedForm()).value();
}
private boolean hasNotSwitched(ApplicationId id) {
return ! deployer().getDedicatedClusterControllerCluster(id);
}
private boolean isQuiescent(ApplicationId id) {
return orchestrator.isQuiescent(id);
}
private void migrate(ApplicationId id) {
log.log(Level.INFO, "Migrating " + id + " to dedicated cluster controller cluster");
deployer().setDedicatedClusterControllerCluster(id);
}
} | class DedicatedClusterControllerClusterMigrator extends ApplicationMaintainer {
private final BooleanFlag flag;
private final Orchestrator orchestrator;
protected DedicatedClusterControllerClusterMigrator(Deployer deployer, Metric metric, NodeRepository nodeRepository,
Duration interval, FlagSource flags, Orchestrator orchestrator) {
super(deployer, metric, nodeRepository, interval);
this.flag = Flags.DEDICATED_CLUSTER_CONTROLLER_CLUSTER.bindTo(flags);
this.orchestrator = orchestrator;
}
@Override
@Override
protected void deploy(ApplicationId id) {
migrate(id);
super.deploy(id);
}
private boolean isEligible(ApplicationId id) {
return deployer().lastDeployTime(id).map(at -> at.isBefore(clock().instant().minus(Duration.ofMinutes(10)))).orElse(false)
&& flag.with(FetchVector.Dimension.APPLICATION_ID, id.serializedForm()).value();
}
private boolean hasNotSwitched(ApplicationId id) {
return ! deployer().getDedicatedClusterControllerCluster(id);
}
private boolean isQuiescent(ApplicationId id) {
return orchestrator.isQuiescent(id);
}
private void migrate(ApplicationId id) {
log.log(Level.INFO, "Migrating " + id + " to dedicated cluster controller cluster");
deployer().setDedicatedClusterControllerCluster(id);
}
} |
Yes, most deployments won't. We could also lower that, I guess, since it's just for the purpose of a working integration test. | protected Set<ApplicationId> applicationsNeedingMaintenance() {
if (deployer().bootstrapping()) return Set.of();
ZonedDateTime date = ZonedDateTime.ofInstant(clock().instant(), java.time.ZoneId.of("Europe/Oslo"));
if ( ! nodeRepository().zone().system().isCd()
&& nodeRepository().zone().environment() != Environment.staging
&& (List.of(SATURDAY, SUNDAY).contains(date.getDayOfWeek()) || date.getHour() < 8 || 12 < date.getHour()))
return Set.of();
return nodeRepository().applications().ids().stream()
.sorted()
.filter(this::isEligible)
.filter(this::hasNotSwitched)
.filter(this::isQuiescent)
.limit(1)
.collect(toUnmodifiableSet());
} | && nodeRepository().zone().environment() != Environment.staging | protected Set<ApplicationId> applicationsNeedingMaintenance() {
if (deployer().bootstrapping()) return Set.of();
ZonedDateTime date = ZonedDateTime.ofInstant(clock().instant(), java.time.ZoneId.of("Europe/Oslo"));
if ( ! nodeRepository().zone().system().isCd()
&& nodeRepository().zone().environment() != Environment.staging
&& (List.of(SATURDAY, SUNDAY).contains(date.getDayOfWeek()) || date.getHour() < 8 || 12 < date.getHour()))
return Set.of();
return nodeRepository().applications().ids().stream()
.sorted()
.filter(this::isEligible)
.filter(this::hasNotSwitched)
.filter(this::isQuiescent)
.limit(1)
.collect(toUnmodifiableSet());
} | class DedicatedClusterControllerClusterMigrator extends ApplicationMaintainer {
private final BooleanFlag flag;
private final Orchestrator orchestrator;
protected DedicatedClusterControllerClusterMigrator(Deployer deployer, Metric metric, NodeRepository nodeRepository,
Duration interval, FlagSource flags, Orchestrator orchestrator) {
super(deployer, metric, nodeRepository, interval);
this.flag = Flags.DEDICATED_CLUSTER_CONTROLLER_CLUSTER.bindTo(flags);
this.orchestrator = orchestrator;
}
@Override
@Override
protected void deploy(ApplicationId id) {
migrate(id);
super.deploy(id);
}
private boolean isEligible(ApplicationId id) {
return deployer().lastDeployTime(id).map(at -> at.isBefore(clock().instant().minus(Duration.ofMinutes(10)))).orElse(false)
&& flag.with(FetchVector.Dimension.APPLICATION_ID, id.serializedForm()).value();
}
private boolean hasNotSwitched(ApplicationId id) {
return ! deployer().getDedicatedClusterControllerCluster(id);
}
private boolean isQuiescent(ApplicationId id) {
return orchestrator.isQuiescent(id);
}
private void migrate(ApplicationId id) {
log.log(Level.INFO, "Migrating " + id + " to dedicated cluster controller cluster");
deployer().setDedicatedClusterControllerCluster(id);
}
} | class DedicatedClusterControllerClusterMigrator extends ApplicationMaintainer {
private final BooleanFlag flag;
private final Orchestrator orchestrator;
protected DedicatedClusterControllerClusterMigrator(Deployer deployer, Metric metric, NodeRepository nodeRepository,
Duration interval, FlagSource flags, Orchestrator orchestrator) {
super(deployer, metric, nodeRepository, interval);
this.flag = Flags.DEDICATED_CLUSTER_CONTROLLER_CLUSTER.bindTo(flags);
this.orchestrator = orchestrator;
}
@Override
@Override
protected void deploy(ApplicationId id) {
migrate(id);
super.deploy(id);
}
private boolean isEligible(ApplicationId id) {
return deployer().lastDeployTime(id).map(at -> at.isBefore(clock().instant().minus(Duration.ofMinutes(10)))).orElse(false)
&& flag.with(FetchVector.Dimension.APPLICATION_ID, id.serializedForm()).value();
}
private boolean hasNotSwitched(ApplicationId id) {
return ! deployer().getDedicatedClusterControllerCluster(id);
}
private boolean isQuiescent(ApplicationId id) {
return orchestrator.isQuiescent(id);
}
private void migrate(ApplicationId id) {
log.log(Level.INFO, "Migrating " + id + " to dedicated cluster controller cluster");
deployer().setDedicatedClusterControllerCluster(id);
}
} |
Anything special about staging or should this be `!nodeRepository().zone().environment().isTest()`? | protected Set<ApplicationId> applicationsNeedingMaintenance() {
if (deployer().bootstrapping()) return Set.of();
ZonedDateTime date = ZonedDateTime.ofInstant(clock().instant(), java.time.ZoneId.of("Europe/Oslo"));
if ( ! nodeRepository().zone().system().isCd()
&& nodeRepository().zone().environment() != Environment.staging
&& (List.of(SATURDAY, SUNDAY).contains(date.getDayOfWeek()) || date.getHour() < 8 || 12 < date.getHour()))
return Set.of();
return nodeRepository().applications().ids().stream()
.sorted()
.filter(this::isEligible)
.filter(this::hasNotSwitched)
.filter(this::isQuiescent)
.limit(1)
.collect(toUnmodifiableSet());
} | && nodeRepository().zone().environment() != Environment.staging | protected Set<ApplicationId> applicationsNeedingMaintenance() {
if (deployer().bootstrapping()) return Set.of();
ZonedDateTime date = ZonedDateTime.ofInstant(clock().instant(), java.time.ZoneId.of("Europe/Oslo"));
if ( ! nodeRepository().zone().system().isCd()
&& nodeRepository().zone().environment() != Environment.staging
&& (List.of(SATURDAY, SUNDAY).contains(date.getDayOfWeek()) || date.getHour() < 8 || 12 < date.getHour()))
return Set.of();
return nodeRepository().applications().ids().stream()
.sorted()
.filter(this::isEligible)
.filter(this::hasNotSwitched)
.filter(this::isQuiescent)
.limit(1)
.collect(toUnmodifiableSet());
} | class DedicatedClusterControllerClusterMigrator extends ApplicationMaintainer {
private final BooleanFlag flag;
private final Orchestrator orchestrator;
protected DedicatedClusterControllerClusterMigrator(Deployer deployer, Metric metric, NodeRepository nodeRepository,
Duration interval, FlagSource flags, Orchestrator orchestrator) {
super(deployer, metric, nodeRepository, interval);
this.flag = Flags.DEDICATED_CLUSTER_CONTROLLER_CLUSTER.bindTo(flags);
this.orchestrator = orchestrator;
}
@Override
@Override
protected void deploy(ApplicationId id) {
migrate(id);
super.deploy(id);
}
private boolean isEligible(ApplicationId id) {
return deployer().lastDeployTime(id).map(at -> at.isBefore(clock().instant().minus(Duration.ofMinutes(10)))).orElse(false)
&& flag.with(FetchVector.Dimension.APPLICATION_ID, id.serializedForm()).value();
}
private boolean hasNotSwitched(ApplicationId id) {
return ! deployer().getDedicatedClusterControllerCluster(id);
}
private boolean isQuiescent(ApplicationId id) {
return orchestrator.isQuiescent(id);
}
private void migrate(ApplicationId id) {
log.log(Level.INFO, "Migrating " + id + " to dedicated cluster controller cluster");
deployer().setDedicatedClusterControllerCluster(id);
}
} | class DedicatedClusterControllerClusterMigrator extends ApplicationMaintainer {
private final BooleanFlag flag;
private final Orchestrator orchestrator;
protected DedicatedClusterControllerClusterMigrator(Deployer deployer, Metric metric, NodeRepository nodeRepository,
Duration interval, FlagSource flags, Orchestrator orchestrator) {
super(deployer, metric, nodeRepository, interval);
this.flag = Flags.DEDICATED_CLUSTER_CONTROLLER_CLUSTER.bindTo(flags);
this.orchestrator = orchestrator;
}
@Override
@Override
protected void deploy(ApplicationId id) {
migrate(id);
super.deploy(id);
}
private boolean isEligible(ApplicationId id) {
return deployer().lastDeployTime(id).map(at -> at.isBefore(clock().instant().minus(Duration.ofMinutes(10)))).orElse(false)
&& flag.with(FetchVector.Dimension.APPLICATION_ID, id.serializedForm()).value();
}
private boolean hasNotSwitched(ApplicationId id) {
return ! deployer().getDedicatedClusterControllerCluster(id);
}
private boolean isQuiescent(ApplicationId id) {
return orchestrator.isQuiescent(id);
}
private void migrate(ApplicationId id) {
log.log(Level.INFO, "Migrating " + id + " to dedicated cluster controller cluster");
deployer().setDedicatedClusterControllerCluster(id);
}
} |
this comment can probably be removed | private void addSummaryConfig(SchemaInfoConfig.Schema.Builder schemaBuilder) {
/*
for (SummaryConfig.Classes sclass : summaryConfig.classes()) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Builder sumClassBuilder = new DocumentdbInfoConfig.Documentdb.Summaryclass.Builder();
sumClassBuilder.
id(sclass.id()).
name(sclass.name()); ------- kommet hit
for (SummaryConfig.Classes.Fields field : sclass.fields()) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Fields.Builder fieldsBuilder = new DocumentdbInfoConfig.Documentdb.Summaryclass.Fields.Builder();
fieldsBuilder.name(field.name())
.type(field.type())
.dynamic(isDynamic(field.name(), summarymapConfig));
sumClassBuilder.fields(fieldsBuilder);
}
docDb.summaryclass(sumClassBuilder);
}
*/
for (var summary : summaries.asList()) {
var summaryBuilder = new SchemaInfoConfig.Schema.Summaryclass.Builder();
summaryBuilder.id(summary.id()).name(summary.getName());
for (var field : summary.fields().values()) {
var fieldsBuilder = new SchemaInfoConfig.Schema.Summaryclass.Fields.Builder();
fieldsBuilder.name(field.getName())
.type(field.getType().getName())
.dynamic(isDynamic(field.getName()));
summaryBuilder.fields(fieldsBuilder);
}
schemaBuilder.summaryclass(summaryBuilder);
}
} | for (SummaryConfig.Classes sclass : summaryConfig.classes()) { | private void addSummaryConfig(SchemaInfoConfig.Schema.Builder schemaBuilder) {
for (var summary : summaries.asList()) {
var summaryBuilder = new SchemaInfoConfig.Schema.Summaryclass.Builder();
summaryBuilder.name(summary.getName());
for (var field : summary.fields().values()) {
var fieldsBuilder = new SchemaInfoConfig.Schema.Summaryclass.Fields.Builder();
fieldsBuilder.name(field.getName())
.type(field.getType().getName())
.dynamic(isDynamic(field.getName()));
summaryBuilder.fields(fieldsBuilder);
}
schemaBuilder.summaryclass(summaryBuilder);
}
} | class SchemaInfo implements SchemaInfoConfig.Producer {
private final Schema schema;
private final Map<String, RankProfileInfo> rankProfiles;
private final Summaries summaries;
private final SummaryMap summaryMap;
public SchemaInfo(Schema schema, RankProfileRegistry rankProfileRegistry,
Summaries summaries, SummaryMap summaryMap) {
this.schema = schema;
this.rankProfiles = Collections.unmodifiableMap(toRankProfiles(rankProfileRegistry.rankProfilesOf(schema)));
this.summaries = summaries;
this.summaryMap = summaryMap;
}
public String name() { return schema.getName(); }
public Schema fullSchema() { return schema; }
public Map<String, RankProfileInfo> rankProfiles() { return rankProfiles; }
private Map<String, RankProfileInfo> toRankProfiles(Collection<RankProfile> rankProfiles) {
Map<String, RankProfileInfo> rankProfileInfos = new LinkedHashMap<>();
rankProfiles.forEach(profile -> rankProfileInfos.put(profile.name(), new RankProfileInfo(profile)));
return rankProfileInfos;
}
@Override
public void getConfig(SchemaInfoConfig.Builder builder) {
var schemaBuilder = new SchemaInfoConfig.Schema.Builder();
schemaBuilder.name(schema.getName());
addSummaryConfig(schemaBuilder);
addRankProfilesConfig(schemaBuilder);
builder.schema(schemaBuilder);
}
/** Returns whether the given field is a dynamic summary field. */
private boolean isDynamic(String fieldName) {
if (summaryMap == null) return false;
var fieldTransform = summaryMap.resultTransforms().get(fieldName);
if (fieldTransform == null) return false;
return fieldTransform.getTransform().isDynamic() ||
fieldTransform.getTransform() == SummaryTransform.MATCHED_ELEMENTS_FILTER ||
fieldTransform.getTransform() == SummaryTransform.MATCHED_ATTRIBUTE_ELEMENTS_FILTER;
}
private void addRankProfilesConfig(SchemaInfoConfig.Schema.Builder schemaBuilder) {
for (RankProfileInfo rankProfile : rankProfiles().values()) {
var rankProfileConfig = new SchemaInfoConfig.Schema.Rankprofile.Builder();
rankProfileConfig.name(rankProfile.name());
rankProfileConfig.hasSummaryFeatures(rankProfile.hasSummaryFeatures());
rankProfileConfig.hasRankFeatures(rankProfile.hasRankFeatures());
for (var input : rankProfile.inputs().entrySet()) {
var inputConfig = new SchemaInfoConfig.Schema.Rankprofile.Input.Builder();
inputConfig.name(input.getKey().toString());
inputConfig.type(input.getValue().toString());
rankProfileConfig.input(inputConfig);
}
schemaBuilder.rankprofile(rankProfileConfig);
}
}
/** A store of a *small* (in memory) amount of rank profile info. */
public static final class RankProfileInfo {
private final String name;
private final boolean hasSummaryFeatures;
private final boolean hasRankFeatures;
private final Map<Reference, TensorType> inputs;
public RankProfileInfo(RankProfile profile) {
this.name = profile.name();
this.hasSummaryFeatures = ! profile.getSummaryFeatures().isEmpty();
this.hasRankFeatures = ! profile.getRankFeatures().isEmpty();
this.inputs = profile.inputs();
}
public String name() { return name; }
public boolean hasSummaryFeatures() { return hasSummaryFeatures; }
public boolean hasRankFeatures() { return hasRankFeatures; }
public Map<Reference, TensorType> inputs() { return inputs; }
}
} | class SchemaInfo implements SchemaInfoConfig.Producer {
private final Schema schema;
private final Map<String, RankProfileInfo> rankProfiles;
private final Summaries summaries;
private final SummaryMap summaryMap;
public SchemaInfo(Schema schema, RankProfileRegistry rankProfileRegistry,
Summaries summaries, SummaryMap summaryMap) {
this.schema = schema;
this.rankProfiles = Collections.unmodifiableMap(toRankProfiles(rankProfileRegistry.rankProfilesOf(schema)));
this.summaries = summaries;
this.summaryMap = summaryMap;
}
public String name() { return schema.getName(); }
public Schema fullSchema() { return schema; }
public Map<String, RankProfileInfo> rankProfiles() { return rankProfiles; }
private Map<String, RankProfileInfo> toRankProfiles(Collection<RankProfile> rankProfiles) {
Map<String, RankProfileInfo> rankProfileInfos = new LinkedHashMap<>();
rankProfiles.forEach(profile -> rankProfileInfos.put(profile.name(), new RankProfileInfo(profile)));
return rankProfileInfos;
}
@Override
public void getConfig(SchemaInfoConfig.Builder builder) {
var schemaBuilder = new SchemaInfoConfig.Schema.Builder();
schemaBuilder.name(schema.getName());
addSummaryConfig(schemaBuilder);
addRankProfilesConfig(schemaBuilder);
builder.schema(schemaBuilder);
}
/** Returns whether the given field is a dynamic summary field. */
private boolean isDynamic(String fieldName) {
if (summaryMap == null) return false;
var fieldTransform = summaryMap.resultTransforms().get(fieldName);
if (fieldTransform == null) return false;
return fieldTransform.getTransform().isDynamic() ||
fieldTransform.getTransform() == SummaryTransform.MATCHED_ELEMENTS_FILTER ||
fieldTransform.getTransform() == SummaryTransform.MATCHED_ATTRIBUTE_ELEMENTS_FILTER;
}
private void addRankProfilesConfig(SchemaInfoConfig.Schema.Builder schemaBuilder) {
for (RankProfileInfo rankProfile : rankProfiles().values()) {
var rankProfileConfig = new SchemaInfoConfig.Schema.Rankprofile.Builder();
rankProfileConfig.name(rankProfile.name());
rankProfileConfig.hasSummaryFeatures(rankProfile.hasSummaryFeatures());
rankProfileConfig.hasRankFeatures(rankProfile.hasRankFeatures());
for (var input : rankProfile.inputs().entrySet()) {
var inputConfig = new SchemaInfoConfig.Schema.Rankprofile.Input.Builder();
inputConfig.name(input.getKey().toString());
inputConfig.type(input.getValue().toString());
rankProfileConfig.input(inputConfig);
}
schemaBuilder.rankprofile(rankProfileConfig);
}
}
/** A store of a *small* (in memory) amount of rank profile info. */
public static final class RankProfileInfo {
private final String name;
private final boolean hasSummaryFeatures;
private final boolean hasRankFeatures;
private final Map<Reference, TensorType> inputs;
public RankProfileInfo(RankProfile profile) {
this.name = profile.name();
this.hasSummaryFeatures = ! profile.getSummaryFeatures().isEmpty();
this.hasRankFeatures = ! profile.getRankFeatures().isEmpty();
this.inputs = profile.inputs();
}
public String name() { return name; }
public boolean hasSummaryFeatures() { return hasSummaryFeatures; }
public boolean hasRankFeatures() { return hasRankFeatures; }
public Map<Reference, TensorType> inputs() { return inputs; }
}
} |
NodeList nodesInCluster = nodeRepository.nodes().list().owner(application).cluster(cluster); | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
NodeList nodesInCluster = nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0]))
.owner(application)
.matching(node -> node.allocation().get().membership().cluster().satisfies(cluster));
NodeIndices indices = new NodeIndices(nodesInCluster, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | NodeList nodesInCluster = nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0])) | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} |
`cluster(cluster.id())` in that case. The code I replaced also checked that `type` was the same—I modified this to be in line with what the actual provisioning code does, which is to use `satisfies(...)`. | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
NodeList nodesInCluster = nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0]))
.owner(application)
.matching(node -> node.allocation().get().membership().cluster().satisfies(cluster));
NodeIndices indices = new NodeIndices(nodesInCluster, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | NodeList nodesInCluster = nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0])) | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} |
Yes. you are right. For now it's enough yet. we can do it later? | public static boolean isOpAppliedRule(Operator op, int ruleMask) {
if (op == null) {
return false;
}
int opRuleMask = op.getOpRuleMask();
return (opRuleMask & ruleMask) != 0;
} | } | public static boolean isOpAppliedRule(Operator op, int ruleMask) {
if (op == null) {
return false;
}
int opRuleMask = op.getOpRuleMask();
return (opRuleMask & ruleMask) != 0;
} | class Utils {
private static final Logger LOG = LogManager.getLogger(Utils.class);
public static List<ScalarOperator> extractConjuncts(ScalarOperator root) {
LinkedList<ScalarOperator> list = new LinkedList<>();
if (null == root) {
return list;
}
extractConjunctsImpl(root, list);
return list;
}
public static Set<ScalarOperator> extractConjunctSet(ScalarOperator root) {
Set<ScalarOperator> list = Sets.newHashSet();
if (null == root) {
return list;
}
extractConjunctsImpl(root, list);
return list;
}
private static void extractConjunctsImpl(ScalarOperator root, Collection<ScalarOperator> result) {
if (!OperatorType.COMPOUND.equals(root.getOpType())) {
result.add(root);
return;
}
CompoundPredicateOperator cpo = (CompoundPredicateOperator) root;
if (!cpo.isAnd()) {
result.add(root);
return;
}
extractConjunctsImpl(cpo.getChild(0), result);
extractConjunctsImpl(cpo.getChild(1), result);
}
public static List<ScalarOperator> extractDisjunctive(ScalarOperator root) {
LinkedList<ScalarOperator> list = new LinkedList<>();
if (null == root) {
return list;
}
extractDisjunctiveImpl(root, list);
return list;
}
private static void extractDisjunctiveImpl(ScalarOperator root, List<ScalarOperator> result) {
if (!OperatorType.COMPOUND.equals(root.getOpType())) {
result.add(root);
return;
}
CompoundPredicateOperator cpo = (CompoundPredicateOperator) root;
if (!cpo.isOr()) {
result.add(root);
return;
}
extractDisjunctiveImpl(cpo.getChild(0), result);
extractDisjunctiveImpl(cpo.getChild(1), result);
}
public static List<ColumnRefOperator> extractColumnRef(ScalarOperator root) {
if (null == root || !root.isVariable()) {
return new LinkedList<>();
}
LinkedList<ColumnRefOperator> list = new LinkedList<>();
if (OperatorType.VARIABLE.equals(root.getOpType())) {
list.add((ColumnRefOperator) root);
return list;
}
for (ScalarOperator child : root.getChildren()) {
list.addAll(extractColumnRef(child));
}
return list;
}
public static int countColumnRef(ScalarOperator root) {
return countColumnRef(root, 0);
}
private static int countColumnRef(ScalarOperator root, int count) {
if (null == root || !root.isVariable()) {
return 0;
}
if (OperatorType.VARIABLE.equals(root.getOpType())) {
return 1;
}
for (ScalarOperator child : root.getChildren()) {
count += countColumnRef(child, count);
}
return count;
}
public static void extractOlapScanOperator(GroupExpression groupExpression, List<LogicalOlapScanOperator> list) {
extractOperator(groupExpression, list, p -> OperatorType.LOGICAL_OLAP_SCAN.equals(p.getOpType()));
}
public static List<PhysicalOlapScanOperator> extractPhysicalOlapScanOperator(OptExpression root) {
List<PhysicalOlapScanOperator> list = Lists.newArrayList();
extractOperator(root, list, op -> OperatorType.PHYSICAL_OLAP_SCAN.equals(op.getOpType()));
return list;
}
public static <E extends Operator> void extractOperator(OptExpression root, List<E> list,
Predicate<Operator> lambda) {
if (lambda.test(root.getOp())) {
list.add((E) root.getOp());
return;
}
List<OptExpression> inputs = root.getInputs();
for (OptExpression input : inputs) {
extractOperator(input, list, lambda);
}
}
private static <E extends Operator> void extractOperator(GroupExpression root, List<E> list,
Predicate<Operator> lambda) {
if (lambda.test(root.getOp())) {
list.add((E) root.getOp());
return;
}
List<Group> groups = root.getInputs();
for (Group group : groups) {
GroupExpression expression = group.getFirstLogicalExpression();
extractOperator(expression, list, lambda);
}
}
public static boolean containAnyColumnRefs(List<ColumnRefOperator> refs, ScalarOperator operator) {
if (refs.isEmpty() || null == operator) {
return false;
}
if (operator.isColumnRef()) {
return refs.contains(operator);
}
for (ScalarOperator so : operator.getChildren()) {
if (containAnyColumnRefs(refs, so)) {
return true;
}
}
return false;
}
public static boolean containColumnRef(ScalarOperator operator, String column) {
if (null == column || null == operator) {
return false;
}
if (operator.isColumnRef()) {
return ((ColumnRefOperator) operator).getName().equalsIgnoreCase(column);
}
for (ScalarOperator so : operator.getChildren()) {
if (containColumnRef(so, column)) {
return true;
}
}
return false;
}
public static ScalarOperator compoundOr(Collection<ScalarOperator> nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.OR, nodes);
}
public static ScalarOperator compoundOr(ScalarOperator... nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.OR, Arrays.asList(nodes));
}
public static ScalarOperator compoundAnd(Collection<ScalarOperator> nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.AND, nodes);
}
public static ScalarOperator compoundAnd(ScalarOperator... nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.AND, Arrays.asList(nodes));
}
public static ScalarOperator createCompound(CompoundPredicateOperator.CompoundType type,
Collection<ScalarOperator> nodes) {
LinkedList<ScalarOperator> link =
nodes.stream().filter(Objects::nonNull).collect(Collectors.toCollection(Lists::newLinkedList));
if (link.size() < 1) {
return null;
}
if (link.size() == 1) {
return link.get(0);
}
while (link.size() > 1) {
LinkedList<ScalarOperator> buffer = new LinkedList<>();
while (link.size() >= 2) {
buffer.add(new CompoundPredicateOperator(type, link.poll(), link.poll()));
}
if (!link.isEmpty()) {
buffer.add(link.remove());
}
link = buffer;
}
return link.remove();
}
public static int countJoinNodeSize(OptExpression root, Set<JoinOperator> joinTypes) {
int count = 0;
Operator operator = root.getOp();
for (OptExpression child : root.getInputs()) {
if (isSuitableJoin(operator, joinTypes)) {
count += countJoinNodeSize(child, joinTypes);
} else {
count = Math.max(count, countJoinNodeSize(child, joinTypes));
}
}
if (isSuitableJoin(operator, joinTypes)) {
count += 1;
}
return count;
}
private static boolean isSuitableJoin(Operator operator, Set<JoinOperator> joinTypes) {
if (operator instanceof LogicalJoinOperator) {
LogicalJoinOperator joinOperator = (LogicalJoinOperator) operator;
return joinTypes.contains(joinOperator.getJoinType()) && joinOperator.getJoinHint().isEmpty();
}
return false;
}
public static boolean capableOuterReorder(OptExpression root, int threshold) {
boolean[] hasOuterOrSemi = {false};
int totalJoinNodes = countJoinNode(root, hasOuterOrSemi);
return totalJoinNodes < threshold && hasOuterOrSemi[0];
}
private static int countJoinNode(OptExpression root, boolean[] hasOuterOrSemi) {
int count = 0;
Operator operator = root.getOp();
for (OptExpression child : root.getInputs()) {
if (operator instanceof LogicalJoinOperator && ((LogicalJoinOperator) operator).getJoinHint().isEmpty()) {
count += countJoinNode(child, hasOuterOrSemi);
} else {
count = Math.max(count, countJoinNode(child, hasOuterOrSemi));
}
}
if (operator instanceof LogicalJoinOperator && ((LogicalJoinOperator) operator).getJoinHint().isEmpty()) {
count += 1;
if (!hasOuterOrSemi[0]) {
LogicalJoinOperator joinOperator = (LogicalJoinOperator) operator;
if (joinOperator.getJoinType().isOuterJoin() || joinOperator.getJoinType().isSemiAntiJoin()) {
hasOuterOrSemi[0] = true;
}
}
}
return count;
}
public static boolean hasUnknownColumnsStats(OptExpression root) {
Operator operator = root.getOp();
if (operator instanceof LogicalScanOperator) {
LogicalScanOperator scanOperator = (LogicalScanOperator) operator;
List<String> colNames =
scanOperator.getColRefToColumnMetaMap().values().stream().map(Column::getName).collect(
Collectors.toList());
if (operator instanceof LogicalOlapScanOperator) {
Table table = scanOperator.getTable();
if (table instanceof OlapTable) {
if (KeysType.AGG_KEYS.equals(((OlapTable) table).getKeysType())) {
List<String> keyColumnNames =
scanOperator.getColRefToColumnMetaMap().values().stream().filter(Column::isKey)
.map(Column::getName)
.collect(Collectors.toList());
List<ColumnStatistic> keyColumnStatisticList =
GlobalStateMgr.getCurrentState().getStatisticStorage().getColumnStatistics(table, keyColumnNames);
return keyColumnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown);
}
}
List<ColumnStatistic> columnStatisticList =
GlobalStateMgr.getCurrentState().getStatisticStorage().getColumnStatistics(table, colNames);
return columnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown);
} else if (operator instanceof LogicalHiveScanOperator || operator instanceof LogicalHudiScanOperator) {
if (ConnectContext.get().getSessionVariable().enableHiveColumnStats()) {
if (operator instanceof LogicalHiveScanOperator) {
return ((LogicalHiveScanOperator) operator).hasUnknownColumn();
} else {
return ((LogicalHudiScanOperator) operator).hasUnknownColumn();
}
}
return true;
} else if (operator instanceof LogicalIcebergScanOperator) {
return ((LogicalIcebergScanOperator) operator).hasUnknownColumn();
} else {
return true;
}
}
return root.getInputs().stream().anyMatch(Utils::hasUnknownColumnsStats);
}
public static long getLongFromDateTime(LocalDateTime dateTime) {
return dateTime.atZone(ZoneId.systemDefault()).toInstant().getEpochSecond();
}
public static LocalDateTime getDatetimeFromLong(long dateTime) {
return LocalDateTime.ofInstant(Instant.ofEpochSecond(dateTime), ZoneId.systemDefault());
}
public static long convertBitSetToLong(BitSet bitSet, int length) {
long gid = 0;
for (int b = 0; b < length; ++b) {
gid = gid * 2 + (bitSet.get(b) ? 1 : 0);
}
return gid;
}
public static ColumnRefOperator findSmallestColumnRefFromTable(Map<ColumnRefOperator, Column> colRefToColumnMetaMap,
Table table) {
Set<Column> baseSchema = new HashSet<>(table.getBaseSchema());
List<ColumnRefOperator> visibleColumnRefs = colRefToColumnMetaMap.entrySet().stream()
.filter(e -> baseSchema.contains(e.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
return findSmallestColumnRef(visibleColumnRefs);
}
public static ColumnRefOperator findSmallestColumnRef(List<ColumnRefOperator> columnRefOperatorList) {
if (CollectionUtils.isEmpty(columnRefOperatorList)) {
return null;
}
ColumnRefOperator smallestColumnRef = columnRefOperatorList.get(0);
int smallestColumnLength = Integer.MAX_VALUE;
for (ColumnRefOperator columnRefOperator : columnRefOperatorList) {
Type columnType = columnRefOperator.getType();
if (columnType.isScalarType() && !columnType.isInvalid() && !columnType.isUnknown()) {
int columnLength = columnType.getTypeSize();
if (columnLength < smallestColumnLength) {
smallestColumnRef = columnRefOperator;
smallestColumnLength = columnLength;
}
}
}
return smallestColumnRef;
}
public static boolean isEqualBinaryPredicate(ScalarOperator predicate) {
if (predicate instanceof BinaryPredicateOperator) {
BinaryPredicateOperator binaryPredicate = (BinaryPredicateOperator) predicate;
return binaryPredicate.getBinaryType().isEquivalence();
}
if (predicate instanceof CompoundPredicateOperator) {
CompoundPredicateOperator compoundPredicate = (CompoundPredicateOperator) predicate;
if (compoundPredicate.isAnd()) {
return isEqualBinaryPredicate(compoundPredicate.getChild(0)) &&
isEqualBinaryPredicate(compoundPredicate.getChild(1));
}
return false;
}
return false;
}
/**
* Try cast op to descType, return empty if failed
*/
public static Optional<ScalarOperator> tryCastConstant(ScalarOperator op, Type descType) {
if (!op.isConstantRef() || op.getType().matchesType(descType) || Type.FLOAT.equals(op.getType())
|| descType.equals(Type.FLOAT)) {
return Optional.empty();
}
if (((ConstantOperator) op).isNull()) {
return Optional.of(ConstantOperator.createNull(descType));
}
Optional<ConstantOperator> result = ((ConstantOperator) op).castToStrictly(descType);
if (result.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("invalid value: {} to type {}", op, descType);
}
return Optional.empty();
}
if (result.get().toString().equalsIgnoreCase(op.toString())) {
return Optional.of(result.get());
} else if (descType.isDate() && (op.getType().isIntegerType() || op.getType().isStringType())) {
if (op.toString().equalsIgnoreCase(result.get().toString().replaceAll("-", ""))) {
return Optional.of(result.get());
}
}
return Optional.empty();
}
public static Optional<ScalarOperator> tryDecimalCastConstant(CastOperator lhs, ConstantOperator rhs) {
Type lhsType = lhs.getType();
Type rhsType = rhs.getType();
Type childType = lhs.getChild(0).getType();
if (!lhsType.isExactNumericType() ||
!rhsType.isExactNumericType() ||
!childType.isExactNumericType()) {
return Optional.empty();
}
if (!Type.isAssignable2Decimal((ScalarType) lhsType, (ScalarType) childType)) {
return Optional.empty();
}
if (rhs.isNull()) {
return Optional.of(ConstantOperator.createNull(childType));
}
Optional<ConstantOperator> result = rhs.castTo(childType);
if (result.isEmpty()) {
return Optional.empty();
}
if (Type.isAssignable2Decimal((ScalarType) childType, (ScalarType) rhsType)) {
return Optional.of(result.get());
} else if (result.get().toString().equalsIgnoreCase(rhs.toString())) {
return Optional.of(result.get());
}
return Optional.empty();
}
public static ScalarOperator transTrue2Null(ScalarOperator predicates) {
if (ConstantOperator.TRUE.equals(predicates)) {
return null;
}
return predicates;
}
public static <T extends ScalarOperator> List<T> collect(ScalarOperator root, Class<T> clazz) {
List<T> output = Lists.newArrayList();
collect(root, clazz, output);
return output;
}
private static <T extends ScalarOperator> void collect(ScalarOperator root, Class<T> clazz, List<T> output) {
if (clazz.isInstance(root)) {
output.add(clazz.cast(root));
}
root.getChildren().forEach(child -> collect(child, clazz, output));
}
/**
* Compute the maximal power-of-two number which is less than or equal to the given number.
*/
public static int computeMaxLEPower2(int num) {
num |= (num >>> 1);
num |= (num >>> 2);
num |= (num >>> 4);
num |= (num >>> 8);
num |= (num >>> 16);
return num - (num >>> 1);
}
/**
* Compute the maximal power-of-two number which is less than or equal to the given number.
*/
public static int computeMinGEPower2(int num) {
num -= 1;
num |= (num >>> 1);
num |= (num >>> 2);
num |= (num >>> 4);
num |= (num >>> 8);
num |= (num >>> 16);
return num < 0 ? 1 : num + 1;
}
public static boolean canEliminateNull(Set<ColumnRefOperator> nullOutputColumnOps, ScalarOperator expression) {
try {
Map<ColumnRefOperator, ScalarOperator> m = nullOutputColumnOps.stream()
.map(op -> new ColumnRefOperator(op.getId(), op.getType(), op.getName(), true))
.collect(Collectors.toMap(identity(), col -> ConstantOperator.createNull(col.getType())));
for (ScalarOperator e : Utils.extractConjuncts(expression)) {
ScalarOperator nullEval = new ReplaceColumnRefRewriter(m).rewrite(e);
ScalarOperatorRewriter scalarRewriter = new ScalarOperatorRewriter();
nullEval = scalarRewriter.rewrite(nullEval, ScalarOperatorRewriter.DEFAULT_REWRITE_RULES);
if (nullEval.isConstantRef() && ((ConstantOperator) nullEval).isNull()) {
return true;
} else if (nullEval.equals(ConstantOperator.createBoolean(false))) {
return true;
}
}
} catch (Exception e) {
LOG.warn("Failed to eliminate null: ", DebugUtil.getStackTrace(e));
return false;
}
return false;
}
public static boolean isNotAlwaysNullResultWithNullScalarOperator(ScalarOperator scalarOperator) {
for (ScalarOperator child : scalarOperator.getChildren()) {
if (isNotAlwaysNullResultWithNullScalarOperator(child)) {
return true;
}
}
if (scalarOperator.isColumnRef() || scalarOperator.isConstantRef() || scalarOperator instanceof CastOperator) {
return false;
} else if (scalarOperator instanceof CallOperator) {
Function fn = ((CallOperator) scalarOperator).getFunction();
if (fn == null) {
return true;
}
if (!GlobalStateMgr.getCurrentState()
.isNotAlwaysNullResultWithNullParamFunction(fn.getFunctionName().getFunction())
&& !fn.isUdf()
&& !FunctionSet.ASSERT_TRUE.equals(fn.getFunctionName().getFunction())) {
return false;
}
}
return true;
}
public static Stream<Integer> getIntStream(RoaringBitmap bitmap) {
Spliterator<Integer> iter = Spliterators.spliteratorUnknownSize(bitmap.iterator(), Spliterator.ORDERED);
return StreamSupport.stream(iter, false);
}
public static Set<Pair<ColumnRefOperator, ColumnRefOperator>> getJoinEqualColRefPairs(OptExpression joinOp) {
Pair<List<BinaryPredicateOperator>, List<ScalarOperator>> onPredicates =
JoinHelper.separateEqualPredicatesFromOthers(joinOp);
List<BinaryPredicateOperator> eqOnPredicates = onPredicates.first;
List<ScalarOperator> otherOnPredicates = onPredicates.second;
if (!otherOnPredicates.isEmpty() || eqOnPredicates.isEmpty()) {
return Collections.emptySet();
}
Set<Pair<ColumnRefOperator, ColumnRefOperator>> eqColumnRefPairs = Sets.newHashSet();
for (BinaryPredicateOperator eqPredicate : eqOnPredicates) {
ColumnRefOperator leftCol = eqPredicate.getChild(0).cast();
ColumnRefOperator rightCol = eqPredicate.getChild(1).cast();
eqColumnRefPairs.add(Pair.create(leftCol, rightCol));
}
return eqColumnRefPairs;
}
public static Map<ColumnRefOperator, ColumnRefOperator> makeEqColumRefMapFromSameTables(
LogicalScanOperator lhsScanOp, LogicalScanOperator rhsScanOp) {
Preconditions.checkArgument(lhsScanOp.getTable().getId() == rhsScanOp.getTable().getId());
Set<Column> lhsColumns = lhsScanOp.getColumnMetaToColRefMap().keySet();
Set<Column> rhsColumns = rhsScanOp.getColumnMetaToColRefMap().keySet();
Preconditions.checkArgument(lhsColumns.equals(rhsColumns));
Map<ColumnRefOperator, ColumnRefOperator> eqColumnRefs = Maps.newHashMap();
for (Column column : lhsColumns) {
ColumnRefOperator lhsColRef = lhsScanOp.getColumnMetaToColRefMap().get(column);
ColumnRefOperator rhsColRef = rhsScanOp.getColumnMetaToColRefMap().get(column);
eqColumnRefs.put(Objects.requireNonNull(lhsColRef), Objects.requireNonNull(rhsColRef));
}
return eqColumnRefs;
}
public static boolean couldGenerateMultiStageAggregate(LogicalProperty inputLogicalProperty,
Operator inputOp, Operator childOp) {
if (mustGenerateMultiStageAggregate(inputOp, childOp)) {
return true;
}
int aggStage = ConnectContext.get().getSessionVariable().getNewPlannerAggStage();
if (aggStage == ONE_STAGE.ordinal() ||
(aggStage == AUTO.ordinal() && inputLogicalProperty.oneTabletProperty().supportOneTabletOpt)) {
return false;
}
return true;
}
public static boolean mustGenerateMultiStageAggregate(Operator inputOp, Operator childOp) {
if (OperatorType.LOGICAL_REPEAT.equals(childOp.getOpType()) || OperatorType.PHYSICAL_REPEAT.equals(childOp.getOpType())) {
return true;
}
Map<ColumnRefOperator, CallOperator> aggs = Maps.newHashMap();
if (OperatorType.LOGICAL_AGGR.equals(inputOp.getOpType())) {
aggs = ((LogicalAggregationOperator) inputOp).getAggregations();
} else if (OperatorType.PHYSICAL_HASH_AGG.equals(inputOp.getOpType())) {
aggs = ((PhysicalHashAggregateOperator) inputOp).getAggregations();
}
for (CallOperator callOperator : aggs.values()) {
if (callOperator.isDistinct()) {
String fnName = callOperator.getFnName();
List<ScalarOperator> children = callOperator.getChildren();
if (children.size() > 1 || children.stream().anyMatch(c -> c.getType().isComplexType())) {
return true;
}
if (FunctionSet.GROUP_CONCAT.equalsIgnoreCase(fnName) || FunctionSet.AVG.equalsIgnoreCase(fnName)) {
return true;
} else if (FunctionSet.ARRAY_AGG.equalsIgnoreCase(fnName)) {
if (children.size() > 1 || children.get(0).getType().isDecimalOfAnyVersion()) {
return true;
}
}
}
}
return false;
}
public static Optional<List<ColumnRefOperator>> extractCommonDistinctCols(Collection<CallOperator> aggCallOperators) {
Set<ColumnRefOperator> distinctChildren = Sets.newHashSet();
for (CallOperator callOperator : aggCallOperators) {
if (callOperator.isDistinct()) {
if (distinctChildren.isEmpty()) {
distinctChildren = Sets.newHashSet(callOperator.getColumnRefs());
} else {
Set<ColumnRefOperator> nextDistinctChildren = Sets.newHashSet(callOperator.getColumnRefs());
if (!SetUtils.isEqualSet(distinctChildren, nextDistinctChildren)) {
return Optional.empty();
}
}
}
}
return Optional.of(Lists.newArrayList(distinctChildren));
}
public static boolean hasNonDeterministicFunc(ScalarOperator operator) {
for (ScalarOperator child : operator.getChildren()) {
if (child instanceof CallOperator) {
CallOperator call = (CallOperator) child;
String fnName = call.getFnName();
if (FunctionSet.nonDeterministicFunctions.contains(fnName)) {
return true;
}
}
if (hasNonDeterministicFunc(child)) {
return true;
}
}
return false;
}
public static void calculateStatistics(OptExpression expr, OptimizerContext context) {
for (OptExpression child : expr.getInputs()) {
calculateStatistics(child, context);
}
if (expr.getOp() instanceof LogicalTreeAnchorOperator) {
return;
}
ExpressionContext expressionContext = new ExpressionContext(expr);
StatisticsCalculator statisticsCalculator = new StatisticsCalculator(
expressionContext, context.getColumnRefFactory(), context);
try {
statisticsCalculator.estimatorStats();
} catch (Exception e) {
LOG.warn("Failed to calculate statistics for expression: {}", expr, e);
return;
}
expr.setStatistics(expressionContext.getStatistics());
}
/**
* Add new project into input, merge input's existing project if input has one.
* @param input input expression
* @param newProjectionMap new project map to be pushed down into input
* @return a new expression with new project
*/
public static OptExpression mergeProjection(OptExpression input,
Map<ColumnRefOperator, ScalarOperator> newProjectionMap) {
if (newProjectionMap == null || newProjectionMap.isEmpty()) {
return input;
}
Operator newOp = input.getOp();
if (newOp.getProjection() == null || newOp.getProjection().getColumnRefMap().isEmpty()) {
newOp.setProjection(new Projection(newProjectionMap));
} else {
ReplaceColumnRefRewriter rewriter = new ReplaceColumnRefRewriter(newOp.getProjection().getColumnRefMap());
Map<ColumnRefOperator, ScalarOperator> resultMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : newProjectionMap.entrySet()) {
ScalarOperator result = rewriter.rewrite(entry.getValue());
resultMap.put(entry.getKey(), result);
}
newOp.setProjection(new Projection(resultMap));
}
return input;
}
/**
* Check if the operator has applied the rule
* @param op input operator to be checked
* @param ruleMask specific rule mask
* @return true if the operator has applied the rule, false otherwise
*/
/**
* Set the rule mask to the operator
* @param op input operator
* @param ruleMask specific rule mask
*/
public static void setOpAppliedRule(Operator op, int ruleMask) {
if (op == null) {
return;
}
op.setOpRuleMask(op.getOpRuleMask() | ruleMask);
}
/**
* Reset the rule mask to the operator
* @param op input operator
* @param ruleMask specific rule mask
*/
public static void resetOpAppliedRule(Operator op, int ruleMask) {
if (op == null) {
return;
}
op.setOpRuleMask(op.getOpRuleMask() | (~ ruleMask));
}
/**
* Check if the optExpression has applied the rule in recursively
* @param optExpression input optExpression to be checked
* @param ruleMask specific rule mask
* @return true if the optExpression or its children have applied the rule, false otherwise
*/
public static boolean isOptHasAppliedRule(OptExpression optExpression, int ruleMask) {
if (optExpression == null) {
return false;
}
if (isOpAppliedRule(optExpression.getOp(), ruleMask)) {
return true;
}
for (OptExpression child : optExpression.getInputs()) {
if (isOptHasAppliedRule(child, ruleMask)) {
return true;
}
}
return false;
}
} | class Utils {
private static final Logger LOG = LogManager.getLogger(Utils.class);
public static List<ScalarOperator> extractConjuncts(ScalarOperator root) {
LinkedList<ScalarOperator> list = new LinkedList<>();
if (null == root) {
return list;
}
extractConjunctsImpl(root, list);
return list;
}
public static Set<ScalarOperator> extractConjunctSet(ScalarOperator root) {
Set<ScalarOperator> list = Sets.newHashSet();
if (null == root) {
return list;
}
extractConjunctsImpl(root, list);
return list;
}
private static void extractConjunctsImpl(ScalarOperator root, Collection<ScalarOperator> result) {
if (!OperatorType.COMPOUND.equals(root.getOpType())) {
result.add(root);
return;
}
CompoundPredicateOperator cpo = (CompoundPredicateOperator) root;
if (!cpo.isAnd()) {
result.add(root);
return;
}
extractConjunctsImpl(cpo.getChild(0), result);
extractConjunctsImpl(cpo.getChild(1), result);
}
public static List<ScalarOperator> extractDisjunctive(ScalarOperator root) {
LinkedList<ScalarOperator> list = new LinkedList<>();
if (null == root) {
return list;
}
extractDisjunctiveImpl(root, list);
return list;
}
private static void extractDisjunctiveImpl(ScalarOperator root, List<ScalarOperator> result) {
if (!OperatorType.COMPOUND.equals(root.getOpType())) {
result.add(root);
return;
}
CompoundPredicateOperator cpo = (CompoundPredicateOperator) root;
if (!cpo.isOr()) {
result.add(root);
return;
}
extractDisjunctiveImpl(cpo.getChild(0), result);
extractDisjunctiveImpl(cpo.getChild(1), result);
}
public static List<ColumnRefOperator> extractColumnRef(ScalarOperator root) {
if (null == root || !root.isVariable()) {
return new LinkedList<>();
}
LinkedList<ColumnRefOperator> list = new LinkedList<>();
if (OperatorType.VARIABLE.equals(root.getOpType())) {
list.add((ColumnRefOperator) root);
return list;
}
for (ScalarOperator child : root.getChildren()) {
list.addAll(extractColumnRef(child));
}
return list;
}
public static int countColumnRef(ScalarOperator root) {
return countColumnRef(root, 0);
}
private static int countColumnRef(ScalarOperator root, int count) {
if (null == root || !root.isVariable()) {
return 0;
}
if (OperatorType.VARIABLE.equals(root.getOpType())) {
return 1;
}
for (ScalarOperator child : root.getChildren()) {
count += countColumnRef(child, count);
}
return count;
}
public static void extractOlapScanOperator(GroupExpression groupExpression, List<LogicalOlapScanOperator> list) {
extractOperator(groupExpression, list, p -> OperatorType.LOGICAL_OLAP_SCAN.equals(p.getOpType()));
}
public static List<PhysicalOlapScanOperator> extractPhysicalOlapScanOperator(OptExpression root) {
List<PhysicalOlapScanOperator> list = Lists.newArrayList();
extractOperator(root, list, op -> OperatorType.PHYSICAL_OLAP_SCAN.equals(op.getOpType()));
return list;
}
public static <E extends Operator> void extractOperator(OptExpression root, List<E> list,
Predicate<Operator> lambda) {
if (lambda.test(root.getOp())) {
list.add((E) root.getOp());
return;
}
List<OptExpression> inputs = root.getInputs();
for (OptExpression input : inputs) {
extractOperator(input, list, lambda);
}
}
private static <E extends Operator> void extractOperator(GroupExpression root, List<E> list,
Predicate<Operator> lambda) {
if (lambda.test(root.getOp())) {
list.add((E) root.getOp());
return;
}
List<Group> groups = root.getInputs();
for (Group group : groups) {
GroupExpression expression = group.getFirstLogicalExpression();
extractOperator(expression, list, lambda);
}
}
public static boolean containAnyColumnRefs(List<ColumnRefOperator> refs, ScalarOperator operator) {
if (refs.isEmpty() || null == operator) {
return false;
}
if (operator.isColumnRef()) {
return refs.contains(operator);
}
for (ScalarOperator so : operator.getChildren()) {
if (containAnyColumnRefs(refs, so)) {
return true;
}
}
return false;
}
public static boolean containColumnRef(ScalarOperator operator, String column) {
if (null == column || null == operator) {
return false;
}
if (operator.isColumnRef()) {
return ((ColumnRefOperator) operator).getName().equalsIgnoreCase(column);
}
for (ScalarOperator so : operator.getChildren()) {
if (containColumnRef(so, column)) {
return true;
}
}
return false;
}
public static ScalarOperator compoundOr(Collection<ScalarOperator> nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.OR, nodes);
}
public static ScalarOperator compoundOr(ScalarOperator... nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.OR, Arrays.asList(nodes));
}
public static ScalarOperator compoundAnd(Collection<ScalarOperator> nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.AND, nodes);
}
public static ScalarOperator compoundAnd(ScalarOperator... nodes) {
return createCompound(CompoundPredicateOperator.CompoundType.AND, Arrays.asList(nodes));
}
public static ScalarOperator createCompound(CompoundPredicateOperator.CompoundType type,
Collection<ScalarOperator> nodes) {
LinkedList<ScalarOperator> link =
nodes.stream().filter(Objects::nonNull).collect(Collectors.toCollection(Lists::newLinkedList));
if (link.size() < 1) {
return null;
}
if (link.size() == 1) {
return link.get(0);
}
while (link.size() > 1) {
LinkedList<ScalarOperator> buffer = new LinkedList<>();
while (link.size() >= 2) {
buffer.add(new CompoundPredicateOperator(type, link.poll(), link.poll()));
}
if (!link.isEmpty()) {
buffer.add(link.remove());
}
link = buffer;
}
return link.remove();
}
public static int countJoinNodeSize(OptExpression root, Set<JoinOperator> joinTypes) {
int count = 0;
Operator operator = root.getOp();
for (OptExpression child : root.getInputs()) {
if (isSuitableJoin(operator, joinTypes)) {
count += countJoinNodeSize(child, joinTypes);
} else {
count = Math.max(count, countJoinNodeSize(child, joinTypes));
}
}
if (isSuitableJoin(operator, joinTypes)) {
count += 1;
}
return count;
}
private static boolean isSuitableJoin(Operator operator, Set<JoinOperator> joinTypes) {
if (operator instanceof LogicalJoinOperator) {
LogicalJoinOperator joinOperator = (LogicalJoinOperator) operator;
return joinTypes.contains(joinOperator.getJoinType()) && joinOperator.getJoinHint().isEmpty();
}
return false;
}
public static boolean capableOuterReorder(OptExpression root, int threshold) {
boolean[] hasOuterOrSemi = {false};
int totalJoinNodes = countJoinNode(root, hasOuterOrSemi);
return totalJoinNodes < threshold && hasOuterOrSemi[0];
}
private static int countJoinNode(OptExpression root, boolean[] hasOuterOrSemi) {
int count = 0;
Operator operator = root.getOp();
for (OptExpression child : root.getInputs()) {
if (operator instanceof LogicalJoinOperator && ((LogicalJoinOperator) operator).getJoinHint().isEmpty()) {
count += countJoinNode(child, hasOuterOrSemi);
} else {
count = Math.max(count, countJoinNode(child, hasOuterOrSemi));
}
}
if (operator instanceof LogicalJoinOperator && ((LogicalJoinOperator) operator).getJoinHint().isEmpty()) {
count += 1;
if (!hasOuterOrSemi[0]) {
LogicalJoinOperator joinOperator = (LogicalJoinOperator) operator;
if (joinOperator.getJoinType().isOuterJoin() || joinOperator.getJoinType().isSemiAntiJoin()) {
hasOuterOrSemi[0] = true;
}
}
}
return count;
}
public static boolean hasUnknownColumnsStats(OptExpression root) {
Operator operator = root.getOp();
if (operator instanceof LogicalScanOperator) {
LogicalScanOperator scanOperator = (LogicalScanOperator) operator;
List<String> colNames =
scanOperator.getColRefToColumnMetaMap().values().stream().map(Column::getName).collect(
Collectors.toList());
if (operator instanceof LogicalOlapScanOperator) {
Table table = scanOperator.getTable();
if (table instanceof OlapTable) {
if (KeysType.AGG_KEYS.equals(((OlapTable) table).getKeysType())) {
List<String> keyColumnNames =
scanOperator.getColRefToColumnMetaMap().values().stream().filter(Column::isKey)
.map(Column::getName)
.collect(Collectors.toList());
List<ColumnStatistic> keyColumnStatisticList =
GlobalStateMgr.getCurrentState().getStatisticStorage().getColumnStatistics(table, keyColumnNames);
return keyColumnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown);
}
}
List<ColumnStatistic> columnStatisticList =
GlobalStateMgr.getCurrentState().getStatisticStorage().getColumnStatistics(table, colNames);
return columnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown);
} else if (operator instanceof LogicalHiveScanOperator || operator instanceof LogicalHudiScanOperator) {
if (ConnectContext.get().getSessionVariable().enableHiveColumnStats()) {
if (operator instanceof LogicalHiveScanOperator) {
return ((LogicalHiveScanOperator) operator).hasUnknownColumn();
} else {
return ((LogicalHudiScanOperator) operator).hasUnknownColumn();
}
}
return true;
} else if (operator instanceof LogicalIcebergScanOperator) {
return ((LogicalIcebergScanOperator) operator).hasUnknownColumn();
} else {
return true;
}
}
return root.getInputs().stream().anyMatch(Utils::hasUnknownColumnsStats);
}
public static long getLongFromDateTime(LocalDateTime dateTime) {
return dateTime.atZone(ZoneId.systemDefault()).toInstant().getEpochSecond();
}
public static LocalDateTime getDatetimeFromLong(long dateTime) {
return LocalDateTime.ofInstant(Instant.ofEpochSecond(dateTime), ZoneId.systemDefault());
}
public static long convertBitSetToLong(BitSet bitSet, int length) {
long gid = 0;
for (int b = 0; b < length; ++b) {
gid = gid * 2 + (bitSet.get(b) ? 1 : 0);
}
return gid;
}
public static ColumnRefOperator findSmallestColumnRefFromTable(Map<ColumnRefOperator, Column> colRefToColumnMetaMap,
Table table) {
Set<Column> baseSchema = new HashSet<>(table.getBaseSchema());
List<ColumnRefOperator> visibleColumnRefs = colRefToColumnMetaMap.entrySet().stream()
.filter(e -> baseSchema.contains(e.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
return findSmallestColumnRef(visibleColumnRefs);
}
public static ColumnRefOperator findSmallestColumnRef(List<ColumnRefOperator> columnRefOperatorList) {
if (CollectionUtils.isEmpty(columnRefOperatorList)) {
return null;
}
ColumnRefOperator smallestColumnRef = columnRefOperatorList.get(0);
int smallestColumnLength = Integer.MAX_VALUE;
for (ColumnRefOperator columnRefOperator : columnRefOperatorList) {
Type columnType = columnRefOperator.getType();
if (columnType.isScalarType() && !columnType.isInvalid() && !columnType.isUnknown()) {
int columnLength = columnType.getTypeSize();
if (columnLength < smallestColumnLength) {
smallestColumnRef = columnRefOperator;
smallestColumnLength = columnLength;
}
}
}
return smallestColumnRef;
}
public static boolean isEqualBinaryPredicate(ScalarOperator predicate) {
if (predicate instanceof BinaryPredicateOperator) {
BinaryPredicateOperator binaryPredicate = (BinaryPredicateOperator) predicate;
return binaryPredicate.getBinaryType().isEquivalence();
}
if (predicate instanceof CompoundPredicateOperator) {
CompoundPredicateOperator compoundPredicate = (CompoundPredicateOperator) predicate;
if (compoundPredicate.isAnd()) {
return isEqualBinaryPredicate(compoundPredicate.getChild(0)) &&
isEqualBinaryPredicate(compoundPredicate.getChild(1));
}
return false;
}
return false;
}
/**
* Try cast op to descType, return empty if failed
*/
public static Optional<ScalarOperator> tryCastConstant(ScalarOperator op, Type descType) {
if (!op.isConstantRef() || op.getType().matchesType(descType) || Type.FLOAT.equals(op.getType())
|| descType.equals(Type.FLOAT)) {
return Optional.empty();
}
if (((ConstantOperator) op).isNull()) {
return Optional.of(ConstantOperator.createNull(descType));
}
Optional<ConstantOperator> result = ((ConstantOperator) op).castToStrictly(descType);
if (result.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("invalid value: {} to type {}", op, descType);
}
return Optional.empty();
}
if (result.get().toString().equalsIgnoreCase(op.toString())) {
return Optional.of(result.get());
} else if (descType.isDate() && (op.getType().isIntegerType() || op.getType().isStringType())) {
if (op.toString().equalsIgnoreCase(result.get().toString().replaceAll("-", ""))) {
return Optional.of(result.get());
}
}
return Optional.empty();
}
public static Optional<ScalarOperator> tryDecimalCastConstant(CastOperator lhs, ConstantOperator rhs) {
Type lhsType = lhs.getType();
Type rhsType = rhs.getType();
Type childType = lhs.getChild(0).getType();
if (!lhsType.isExactNumericType() ||
!rhsType.isExactNumericType() ||
!childType.isExactNumericType()) {
return Optional.empty();
}
if (!Type.isAssignable2Decimal((ScalarType) lhsType, (ScalarType) childType)) {
return Optional.empty();
}
if (rhs.isNull()) {
return Optional.of(ConstantOperator.createNull(childType));
}
Optional<ConstantOperator> result = rhs.castTo(childType);
if (result.isEmpty()) {
return Optional.empty();
}
if (Type.isAssignable2Decimal((ScalarType) childType, (ScalarType) rhsType)) {
return Optional.of(result.get());
} else if (result.get().toString().equalsIgnoreCase(rhs.toString())) {
return Optional.of(result.get());
}
return Optional.empty();
}
public static ScalarOperator transTrue2Null(ScalarOperator predicates) {
if (ConstantOperator.TRUE.equals(predicates)) {
return null;
}
return predicates;
}
public static <T extends ScalarOperator> List<T> collect(ScalarOperator root, Class<T> clazz) {
List<T> output = Lists.newArrayList();
collect(root, clazz, output);
return output;
}
private static <T extends ScalarOperator> void collect(ScalarOperator root, Class<T> clazz, List<T> output) {
if (clazz.isInstance(root)) {
output.add(clazz.cast(root));
}
root.getChildren().forEach(child -> collect(child, clazz, output));
}
/**
* Compute the maximal power-of-two number which is less than or equal to the given number.
*/
public static int computeMaxLEPower2(int num) {
num |= (num >>> 1);
num |= (num >>> 2);
num |= (num >>> 4);
num |= (num >>> 8);
num |= (num >>> 16);
return num - (num >>> 1);
}
/**
* Compute the maximal power-of-two number which is less than or equal to the given number.
*/
public static int computeMinGEPower2(int num) {
num -= 1;
num |= (num >>> 1);
num |= (num >>> 2);
num |= (num >>> 4);
num |= (num >>> 8);
num |= (num >>> 16);
return num < 0 ? 1 : num + 1;
}
/**
* Check the input expression is not nullable or not.
* @param nullOutputColumnOps the nullable column reference operators.
* @param expression the input expression.
* @return true if the expression is not nullable, otherwise false.
*/
public static boolean canEliminateNull(Set<ColumnRefOperator> nullOutputColumnOps, ScalarOperator expression) {
try {
Map<ColumnRefOperator, ScalarOperator> m = nullOutputColumnOps.stream()
.map(op -> new ColumnRefOperator(op.getId(), op.getType(), op.getName(), true))
.collect(Collectors.toMap(identity(), col -> ConstantOperator.createNull(col.getType())));
for (ScalarOperator e : Utils.extractConjuncts(expression)) {
ScalarOperator nullEval = new ReplaceColumnRefRewriter(m).rewrite(e);
ScalarOperatorRewriter scalarRewriter = new ScalarOperatorRewriter();
nullEval = scalarRewriter.rewrite(nullEval, ScalarOperatorRewriter.DEFAULT_REWRITE_RULES);
if (nullEval.isConstantRef() && ((ConstantOperator) nullEval).isNull()) {
return true;
} else if (nullEval.equals(ConstantOperator.FALSE)) {
return true;
}
}
} catch (Throwable e) {
LOG.warn("Failed to eliminate null: {}", DebugUtil.getStackTrace(e));
return false;
}
return false;
}
public static boolean isNotAlwaysNullResultWithNullScalarOperator(ScalarOperator scalarOperator) {
for (ScalarOperator child : scalarOperator.getChildren()) {
if (isNotAlwaysNullResultWithNullScalarOperator(child)) {
return true;
}
}
if (scalarOperator.isColumnRef() || scalarOperator.isConstantRef() || scalarOperator instanceof CastOperator) {
return false;
} else if (scalarOperator instanceof CallOperator) {
Function fn = ((CallOperator) scalarOperator).getFunction();
if (fn == null) {
return true;
}
if (!GlobalStateMgr.getCurrentState()
.isNotAlwaysNullResultWithNullParamFunction(fn.getFunctionName().getFunction())
&& !fn.isUdf()
&& !FunctionSet.ASSERT_TRUE.equals(fn.getFunctionName().getFunction())) {
return false;
}
}
return true;
}
public static Stream<Integer> getIntStream(RoaringBitmap bitmap) {
Spliterator<Integer> iter = Spliterators.spliteratorUnknownSize(bitmap.iterator(), Spliterator.ORDERED);
return StreamSupport.stream(iter, false);
}
public static Set<Pair<ColumnRefOperator, ColumnRefOperator>> getJoinEqualColRefPairs(OptExpression joinOp) {
Pair<List<BinaryPredicateOperator>, List<ScalarOperator>> onPredicates =
JoinHelper.separateEqualPredicatesFromOthers(joinOp);
List<BinaryPredicateOperator> eqOnPredicates = onPredicates.first;
List<ScalarOperator> otherOnPredicates = onPredicates.second;
if (!otherOnPredicates.isEmpty() || eqOnPredicates.isEmpty()) {
return Collections.emptySet();
}
Set<Pair<ColumnRefOperator, ColumnRefOperator>> eqColumnRefPairs = Sets.newHashSet();
for (BinaryPredicateOperator eqPredicate : eqOnPredicates) {
ColumnRefOperator leftCol = eqPredicate.getChild(0).cast();
ColumnRefOperator rightCol = eqPredicate.getChild(1).cast();
eqColumnRefPairs.add(Pair.create(leftCol, rightCol));
}
return eqColumnRefPairs;
}
public static Map<ColumnRefOperator, ColumnRefOperator> makeEqColumRefMapFromSameTables(
LogicalScanOperator lhsScanOp, LogicalScanOperator rhsScanOp) {
Preconditions.checkArgument(lhsScanOp.getTable().getId() == rhsScanOp.getTable().getId());
Set<Column> lhsColumns = lhsScanOp.getColumnMetaToColRefMap().keySet();
Set<Column> rhsColumns = rhsScanOp.getColumnMetaToColRefMap().keySet();
Preconditions.checkArgument(lhsColumns.equals(rhsColumns));
Map<ColumnRefOperator, ColumnRefOperator> eqColumnRefs = Maps.newHashMap();
for (Column column : lhsColumns) {
ColumnRefOperator lhsColRef = lhsScanOp.getColumnMetaToColRefMap().get(column);
ColumnRefOperator rhsColRef = rhsScanOp.getColumnMetaToColRefMap().get(column);
eqColumnRefs.put(Objects.requireNonNull(lhsColRef), Objects.requireNonNull(rhsColRef));
}
return eqColumnRefs;
}
public static boolean couldGenerateMultiStageAggregate(LogicalProperty inputLogicalProperty,
Operator inputOp, Operator childOp) {
if (mustGenerateMultiStageAggregate(inputOp, childOp)) {
return true;
}
int aggStage = ConnectContext.get().getSessionVariable().getNewPlannerAggStage();
if (aggStage == ONE_STAGE.ordinal() ||
(aggStage == AUTO.ordinal() && inputLogicalProperty.oneTabletProperty().supportOneTabletOpt)) {
return false;
}
return true;
}
public static boolean mustGenerateMultiStageAggregate(Operator inputOp, Operator childOp) {
if (OperatorType.LOGICAL_REPEAT.equals(childOp.getOpType()) || OperatorType.PHYSICAL_REPEAT.equals(childOp.getOpType())) {
return true;
}
Map<ColumnRefOperator, CallOperator> aggs = Maps.newHashMap();
if (OperatorType.LOGICAL_AGGR.equals(inputOp.getOpType())) {
aggs = ((LogicalAggregationOperator) inputOp).getAggregations();
} else if (OperatorType.PHYSICAL_HASH_AGG.equals(inputOp.getOpType())) {
aggs = ((PhysicalHashAggregateOperator) inputOp).getAggregations();
}
for (CallOperator callOperator : aggs.values()) {
if (callOperator.isDistinct()) {
String fnName = callOperator.getFnName();
List<ScalarOperator> children = callOperator.getChildren();
if (children.size() > 1 || children.stream().anyMatch(c -> c.getType().isComplexType())) {
return true;
}
if (FunctionSet.GROUP_CONCAT.equalsIgnoreCase(fnName) || FunctionSet.AVG.equalsIgnoreCase(fnName)) {
return true;
} else if (FunctionSet.ARRAY_AGG.equalsIgnoreCase(fnName)) {
if (children.size() > 1 || children.get(0).getType().isDecimalOfAnyVersion()) {
return true;
}
}
}
}
return false;
}
public static Optional<List<ColumnRefOperator>> extractCommonDistinctCols(Collection<CallOperator> aggCallOperators) {
Set<ColumnRefOperator> distinctChildren = Sets.newHashSet();
for (CallOperator callOperator : aggCallOperators) {
if (callOperator.isDistinct()) {
if (distinctChildren.isEmpty()) {
distinctChildren = Sets.newHashSet(callOperator.getColumnRefs());
} else {
Set<ColumnRefOperator> nextDistinctChildren = Sets.newHashSet(callOperator.getColumnRefs());
if (!SetUtils.isEqualSet(distinctChildren, nextDistinctChildren)) {
return Optional.empty();
}
}
}
}
return Optional.of(Lists.newArrayList(distinctChildren));
}
public static boolean hasNonDeterministicFunc(ScalarOperator operator) {
for (ScalarOperator child : operator.getChildren()) {
if (child instanceof CallOperator) {
CallOperator call = (CallOperator) child;
String fnName = call.getFnName();
if (FunctionSet.nonDeterministicFunctions.contains(fnName)) {
return true;
}
}
if (hasNonDeterministicFunc(child)) {
return true;
}
}
return false;
}
public static void calculateStatistics(OptExpression expr, OptimizerContext context) {
for (OptExpression child : expr.getInputs()) {
calculateStatistics(child, context);
}
if (expr.getOp() instanceof LogicalTreeAnchorOperator) {
return;
}
ExpressionContext expressionContext = new ExpressionContext(expr);
StatisticsCalculator statisticsCalculator = new StatisticsCalculator(
expressionContext, context.getColumnRefFactory(), context);
try {
statisticsCalculator.estimatorStats();
} catch (Exception e) {
LOG.warn("Failed to calculate statistics for expression: {}", expr, e);
return;
}
expr.setStatistics(expressionContext.getStatistics());
}
/**
* Add new project into input, merge input's existing project if input has one.
* @param input input expression
* @param newProjectionMap new project map to be pushed down into input
* @return a new expression with new project
*/
public static OptExpression mergeProjection(OptExpression input,
Map<ColumnRefOperator, ScalarOperator> newProjectionMap) {
if (newProjectionMap == null || newProjectionMap.isEmpty()) {
return input;
}
Operator newOp = input.getOp();
if (newOp.getProjection() == null || newOp.getProjection().getColumnRefMap().isEmpty()) {
newOp.setProjection(new Projection(newProjectionMap));
} else {
ReplaceColumnRefRewriter rewriter = new ReplaceColumnRefRewriter(newOp.getProjection().getColumnRefMap());
Map<ColumnRefOperator, ScalarOperator> resultMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : newProjectionMap.entrySet()) {
ScalarOperator result = rewriter.rewrite(entry.getValue());
resultMap.put(entry.getKey(), result);
}
newOp.setProjection(new Projection(resultMap));
}
return input;
}
/**
* Check if the operator has applied the rule
* @param op input operator to be checked
* @param ruleMask specific rule mask
* @return true if the operator has applied the rule, false otherwise
*/
/**
* Set the rule mask to the operator
* @param op input operator
* @param ruleMask specific rule mask
*/
public static void setOpAppliedRule(Operator op, int ruleMask) {
if (op == null) {
return;
}
op.setOpRuleMask(op.getOpRuleMask() | ruleMask);
}
/**
* Reset the rule mask to the operator
* @param op input operator
* @param ruleMask specific rule mask
*/
public static void resetOpAppliedRule(Operator op, int ruleMask) {
if (op == null) {
return;
}
op.setOpRuleMask(op.getOpRuleMask() | (~ ruleMask));
}
/**
* Check if the optExpression has applied the rule in recursively
* @param optExpression input optExpression to be checked
* @param ruleMask specific rule mask
* @return true if the optExpression or its children have applied the rule, false otherwise
*/
public static boolean isOptHasAppliedRule(OptExpression optExpression, int ruleMask) {
if (optExpression == null) {
return false;
}
if (isOpAppliedRule(optExpression.getOp(), ruleMask)) {
return true;
}
for (OptExpression child : optExpression.getInputs()) {
if (isOptHasAppliedRule(child, ruleMask)) {
return true;
}
}
return false;
}
} |
.. but you're saying that's irrelevant, and that id is the only thing that matters (which I was I was expecting, tbh.) | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
NodeList nodesInCluster = nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0]))
.owner(application)
.matching(node -> node.allocation().get().membership().cluster().satisfies(cluster));
NodeIndices indices = new NodeIndices(nodesInCluster, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | NodeList nodesInCluster = nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0])) | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} |
Yes (and also that these filters out unallocated). | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
NodeList nodesInCluster = nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0]))
.owner(application)
.matching(node -> node.allocation().get().membership().cluster().satisfies(cluster));
NodeIndices indices = new NodeIndices(nodesInCluster, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | NodeList nodesInCluster = nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0])) | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} |
Thanks, good point. | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
NodeList nodesInCluster = nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0]))
.owner(application)
.matching(node -> node.allocation().get().membership().cluster().satisfies(cluster));
NodeIndices indices = new NodeIndices(nodesInCluster, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | NodeList nodesInCluster = nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0])) | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} |
Hm. I find it a little unfortunate that we will upload this file with that name - in theory, if the host h432 remains, a new h432a could be allocated for the same app, then be retired and produce a new vespa.log file - and being outside the time-based naming scheme makes it more difficult to include this file when processing logs. Perhaps we should instead always ensure it gets rotated first? | public void vespa_logs() {
assertForLogFile(vespaLogPath1, null, null, true);
assertForLogFile(vespaLogPath1, "s3:
assertForLogFile(vespaLogPath2, "s3:
assertForLogFile(vespaLogPath2, "s3:
} | assertForLogFile(vespaLogPath1, "s3: | public void vespa_logs() {
assertForLogFile(vespaLogPath1, null, null, true);
assertForLogFile(vespaLogPath1, "s3:
assertForLogFile(vespaLogPath2, "s3:
assertForLogFile(vespaLogPath2, "s3:
} | class SyncFileInfoTest {
private static final FileSystem fileSystem = TestFileSystem.create();
private static final URI nodeArchiveUri = URI.create("s3:
private static final Path accessLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210211");
private static final Path accessLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210212.zst");
private static final Path accessLogPath3 = fileSystem.getPath("/opt/vespa/logs/qrs/access-json.log.20210213.zst");
private static final Path accessLogPath4 = fileSystem.getPath("/opt/vespa/logs/qrs/JsonAccessLog.default.20210214.zst");
private static final Path connectionLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210210");
private static final Path connectionLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210212.zst");
private static final Path vespaLogPath1 = fileSystem.getPath("/opt/vespa/logs/vespa.log");
private static final Path vespaLogPath2 = fileSystem.getPath("/opt/vespa/logs/vespa.log-2021-02-12");
@Test
public void access_logs() {
assertForLogFile(accessLogPath1, null, null, true);
assertForLogFile(accessLogPath1, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath4, "s3:
assertForLogFile(accessLogPath4, "s3:
}
@Test
public void connection_logs() {
assertForLogFile(connectionLogPath1, null, null, true);
assertForLogFile(connectionLogPath1, "s3:
assertForLogFile(connectionLogPath2, "s3:
assertForLogFile(connectionLogPath2, "s3:
}
@Test
private static void assertForLogFile(Path srcPath, String destination, SyncFileInfo.Compression compression, boolean rotatedOnly) {
Optional<SyncFileInfo> sfi = SyncFileInfo.forLogFile(nodeArchiveUri, srcPath, rotatedOnly);
assertEquals(destination, sfi.map(SyncFileInfo::destination).map(URI::toString).orElse(null));
assertEquals(compression, sfi.map(SyncFileInfo::uploadCompression).orElse(null));
}
} | class SyncFileInfoTest {
private static final FileSystem fileSystem = TestFileSystem.create();
private static final URI nodeArchiveUri = URI.create("s3:
private static final Path accessLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210211");
private static final Path accessLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210212.zst");
private static final Path accessLogPath3 = fileSystem.getPath("/opt/vespa/logs/qrs/access-json.log.20210213.zst");
private static final Path accessLogPath4 = fileSystem.getPath("/opt/vespa/logs/qrs/JsonAccessLog.default.20210214.zst");
private static final Path connectionLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210210");
private static final Path connectionLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210212.zst");
private static final Path vespaLogPath1 = fileSystem.getPath("/opt/vespa/logs/vespa.log");
private static final Path vespaLogPath2 = fileSystem.getPath("/opt/vespa/logs/vespa.log-2021-02-12");
@Test
public void access_logs() {
assertForLogFile(accessLogPath1, null, null, true);
assertForLogFile(accessLogPath1, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath4, "s3:
assertForLogFile(accessLogPath4, "s3:
}
@Test
public void connection_logs() {
assertForLogFile(connectionLogPath1, null, null, true);
assertForLogFile(connectionLogPath1, "s3:
assertForLogFile(connectionLogPath2, "s3:
assertForLogFile(connectionLogPath2, "s3:
}
@Test
private static void assertForLogFile(Path srcPath, String destination, SyncFileInfo.Compression compression, boolean rotatedOnly) {
Optional<SyncFileInfo> sfi = SyncFileInfo.forLogFile(nodeArchiveUri, srcPath, rotatedOnly);
assertEquals(destination, sfi.map(SyncFileInfo::destination).map(URI::toString).orElse(null));
assertEquals(compression, sfi.map(SyncFileInfo::uploadCompression).orElse(null));
}
} |
That would be great, but I don't see how that can be done, other than node-admin rotating it manually. In that case it'd be easier to just upload it with a different name? | public void vespa_logs() {
assertForLogFile(vespaLogPath1, null, null, true);
assertForLogFile(vespaLogPath1, "s3:
assertForLogFile(vespaLogPath2, "s3:
assertForLogFile(vespaLogPath2, "s3:
} | assertForLogFile(vespaLogPath1, "s3: | public void vespa_logs() {
assertForLogFile(vespaLogPath1, null, null, true);
assertForLogFile(vespaLogPath1, "s3:
assertForLogFile(vespaLogPath2, "s3:
assertForLogFile(vespaLogPath2, "s3:
} | class SyncFileInfoTest {
private static final FileSystem fileSystem = TestFileSystem.create();
private static final URI nodeArchiveUri = URI.create("s3:
private static final Path accessLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210211");
private static final Path accessLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210212.zst");
private static final Path accessLogPath3 = fileSystem.getPath("/opt/vespa/logs/qrs/access-json.log.20210213.zst");
private static final Path accessLogPath4 = fileSystem.getPath("/opt/vespa/logs/qrs/JsonAccessLog.default.20210214.zst");
private static final Path connectionLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210210");
private static final Path connectionLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210212.zst");
private static final Path vespaLogPath1 = fileSystem.getPath("/opt/vespa/logs/vespa.log");
private static final Path vespaLogPath2 = fileSystem.getPath("/opt/vespa/logs/vespa.log-2021-02-12");
@Test
public void access_logs() {
assertForLogFile(accessLogPath1, null, null, true);
assertForLogFile(accessLogPath1, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath4, "s3:
assertForLogFile(accessLogPath4, "s3:
}
@Test
public void connection_logs() {
assertForLogFile(connectionLogPath1, null, null, true);
assertForLogFile(connectionLogPath1, "s3:
assertForLogFile(connectionLogPath2, "s3:
assertForLogFile(connectionLogPath2, "s3:
}
@Test
private static void assertForLogFile(Path srcPath, String destination, SyncFileInfo.Compression compression, boolean rotatedOnly) {
Optional<SyncFileInfo> sfi = SyncFileInfo.forLogFile(nodeArchiveUri, srcPath, rotatedOnly);
assertEquals(destination, sfi.map(SyncFileInfo::destination).map(URI::toString).orElse(null));
assertEquals(compression, sfi.map(SyncFileInfo::uploadCompression).orElse(null));
}
} | class SyncFileInfoTest {
private static final FileSystem fileSystem = TestFileSystem.create();
private static final URI nodeArchiveUri = URI.create("s3:
private static final Path accessLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210211");
private static final Path accessLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210212.zst");
private static final Path accessLogPath3 = fileSystem.getPath("/opt/vespa/logs/qrs/access-json.log.20210213.zst");
private static final Path accessLogPath4 = fileSystem.getPath("/opt/vespa/logs/qrs/JsonAccessLog.default.20210214.zst");
private static final Path connectionLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210210");
private static final Path connectionLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210212.zst");
private static final Path vespaLogPath1 = fileSystem.getPath("/opt/vespa/logs/vespa.log");
private static final Path vespaLogPath2 = fileSystem.getPath("/opt/vespa/logs/vespa.log-2021-02-12");
@Test
public void access_logs() {
assertForLogFile(accessLogPath1, null, null, true);
assertForLogFile(accessLogPath1, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath4, "s3:
assertForLogFile(accessLogPath4, "s3:
}
@Test
public void connection_logs() {
assertForLogFile(connectionLogPath1, null, null, true);
assertForLogFile(connectionLogPath1, "s3:
assertForLogFile(connectionLogPath2, "s3:
assertForLogFile(connectionLogPath2, "s3:
}
@Test
private static void assertForLogFile(Path srcPath, String destination, SyncFileInfo.Compression compression, boolean rotatedOnly) {
Optional<SyncFileInfo> sfi = SyncFileInfo.forLogFile(nodeArchiveUri, srcPath, rotatedOnly);
assertEquals(destination, sfi.map(SyncFileInfo::destination).map(URI::toString).orElse(null));
assertEquals(compression, sfi.map(SyncFileInfo::uploadCompression).orElse(null));
}
} |
Yes ... will think about it. Feel free to go ahead and merge this PR, we can change that later if we want to. | public void vespa_logs() {
assertForLogFile(vespaLogPath1, null, null, true);
assertForLogFile(vespaLogPath1, "s3:
assertForLogFile(vespaLogPath2, "s3:
assertForLogFile(vespaLogPath2, "s3:
} | assertForLogFile(vespaLogPath1, "s3: | public void vespa_logs() {
assertForLogFile(vespaLogPath1, null, null, true);
assertForLogFile(vespaLogPath1, "s3:
assertForLogFile(vespaLogPath2, "s3:
assertForLogFile(vespaLogPath2, "s3:
} | class SyncFileInfoTest {
private static final FileSystem fileSystem = TestFileSystem.create();
private static final URI nodeArchiveUri = URI.create("s3:
private static final Path accessLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210211");
private static final Path accessLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210212.zst");
private static final Path accessLogPath3 = fileSystem.getPath("/opt/vespa/logs/qrs/access-json.log.20210213.zst");
private static final Path accessLogPath4 = fileSystem.getPath("/opt/vespa/logs/qrs/JsonAccessLog.default.20210214.zst");
private static final Path connectionLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210210");
private static final Path connectionLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210212.zst");
private static final Path vespaLogPath1 = fileSystem.getPath("/opt/vespa/logs/vespa.log");
private static final Path vespaLogPath2 = fileSystem.getPath("/opt/vespa/logs/vespa.log-2021-02-12");
@Test
public void access_logs() {
assertForLogFile(accessLogPath1, null, null, true);
assertForLogFile(accessLogPath1, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath4, "s3:
assertForLogFile(accessLogPath4, "s3:
}
@Test
public void connection_logs() {
assertForLogFile(connectionLogPath1, null, null, true);
assertForLogFile(connectionLogPath1, "s3:
assertForLogFile(connectionLogPath2, "s3:
assertForLogFile(connectionLogPath2, "s3:
}
@Test
private static void assertForLogFile(Path srcPath, String destination, SyncFileInfo.Compression compression, boolean rotatedOnly) {
Optional<SyncFileInfo> sfi = SyncFileInfo.forLogFile(nodeArchiveUri, srcPath, rotatedOnly);
assertEquals(destination, sfi.map(SyncFileInfo::destination).map(URI::toString).orElse(null));
assertEquals(compression, sfi.map(SyncFileInfo::uploadCompression).orElse(null));
}
} | class SyncFileInfoTest {
private static final FileSystem fileSystem = TestFileSystem.create();
private static final URI nodeArchiveUri = URI.create("s3:
private static final Path accessLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210211");
private static final Path accessLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/access.log.20210212.zst");
private static final Path accessLogPath3 = fileSystem.getPath("/opt/vespa/logs/qrs/access-json.log.20210213.zst");
private static final Path accessLogPath4 = fileSystem.getPath("/opt/vespa/logs/qrs/JsonAccessLog.default.20210214.zst");
private static final Path connectionLogPath1 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210210");
private static final Path connectionLogPath2 = fileSystem.getPath("/opt/vespa/logs/qrs/ConnectionLog.default.20210212.zst");
private static final Path vespaLogPath1 = fileSystem.getPath("/opt/vespa/logs/vespa.log");
private static final Path vespaLogPath2 = fileSystem.getPath("/opt/vespa/logs/vespa.log-2021-02-12");
@Test
public void access_logs() {
assertForLogFile(accessLogPath1, null, null, true);
assertForLogFile(accessLogPath1, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath2, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath3, "s3:
assertForLogFile(accessLogPath4, "s3:
assertForLogFile(accessLogPath4, "s3:
}
@Test
public void connection_logs() {
assertForLogFile(connectionLogPath1, null, null, true);
assertForLogFile(connectionLogPath1, "s3:
assertForLogFile(connectionLogPath2, "s3:
assertForLogFile(connectionLogPath2, "s3:
}
@Test
private static void assertForLogFile(Path srcPath, String destination, SyncFileInfo.Compression compression, boolean rotatedOnly) {
Optional<SyncFileInfo> sfi = SyncFileInfo.forLogFile(nodeArchiveUri, srcPath, rotatedOnly);
assertEquals(destination, sfi.map(SyncFileInfo::destination).map(URI::toString).orElse(null));
assertEquals(compression, sfi.map(SyncFileInfo::uploadCompression).orElse(null));
}
} |
Consider making the other `fail` method call this. | public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, Node.State.failed, agent, Optional.of(reason));
} | return move(hostname, keepAllocation, Node.State.failed, agent, Optional.of(reason)); | public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, Node.State.failed, agent, Optional.of(reason));
} | class Nodes {
private final Zone zone;
private final Clock clock;
private final CuratorDatabaseClient db;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) {
this.zone = zone;
this.clock = clock;
this.db = db;
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent);
db.removeNodes(nodesToRemove);
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty())
illegal("Can not set host " + node + " ready. Its IP address pool is empty.");
return node.withWantToRetire(false, false, Agent.system, clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = node(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(List.of(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (node.state() != Node.State.parked && agent != Agent.operator
&& (node.status().wantToDeprovision() || retiredByOperator(node)))
return park(node.hostname(), false, agent, reason, transaction);
else
return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
private static boolean retiredByOperator(Node node) {
return node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(agent -> agent == Agent.operator)
.orElse(false);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, keepAllocation, agent, reason, transaction);
transaction.commit();
return parked;
}
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, keepAllocation, Node.State.parked, agent, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, Node.State.active, agent, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = node(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found"));
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
List<Node> removed = removeChildren(node, false);
removed.add(move(node, Node.State.breakfixed, agent, Optional.of(reason)));
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, keepAllocation, toState, agent, reason, transaction);
transaction.commit();
return moved;
}
private Node move(String hostname, boolean keepAllocation, Node.State toState, Agent agent, Optional<String> reason,
NestedTransaction transaction) {
Node node = node(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason, transaction);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(node, toState, agent, reason, transaction);
transaction.commit();
return moved;
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) {
if (toState == Node.State.active && node.allocation().isEmpty())
illegal("Could not set " + node + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = node(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(List.of(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = node(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
if (node.type().isHost()) {
List<Node> removed = removeChildren(node, force);
if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host)
db.removeNodes(List.of(node));
else {
node = node.with(IP.Config.EMPTY);
move(node, Node.State.deprovisioned, Agent.system, Optional.empty());
}
removed.add(node);
return removed;
}
else {
List<Node> removed = List.of(node);
db.removeNodes(removed);
return removed;
}
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
db.removeNodes(List.of(node));
}
private List<Node> removeChildren(Node node, boolean force) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node: node is unallocated
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingAsChild, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent())
illegal(node + " is currently allocated and cannot be removed");
if (!node.type().isHost() && !removingAsChild) {
if (node.state() != Node.State.ready)
illegal(node + " can not be removed as it is not in the state " + Node.State.ready);
}
else if (!node.type().isHost()) {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready);
if ( ! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
else {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.getCloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(Node host, Agent agent, Instant instant) {
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot deprovision non-host " + host);
Optional<NodeMutex> nodeMutex = lockAndGet(host);
if (nodeMutex.isEmpty()) return List.of();
List<Node> result;
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
host = lock.node();
NodeList children = list(allocationLock).childrenOf(host);
result = retire(NodeListFilter.from(children.asList()), agent, instant);
result.add(write(host.withWantToRetire(true, true, agent, instant), lock));
}
return result;
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.readNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning());
}
public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname()));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname));
}
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
} | class Nodes {
private final Zone zone;
private final Clock clock;
private final CuratorDatabaseClient db;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) {
this.zone = zone;
this.clock = clock;
this.db = db;
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent);
db.removeNodes(nodesToRemove);
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty())
illegal("Can not set host " + node + " ready. Its IP address pool is empty.");
return node.withWantToRetire(false, false, Agent.system, clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = node(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(List.of(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (node.state() != Node.State.parked && agent != Agent.operator
&& (node.status().wantToDeprovision() || retiredByOperator(node)))
return park(node.hostname(), false, agent, reason, transaction);
else
return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
private static boolean retiredByOperator(Node node) {
return node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(agent -> agent == Agent.operator)
.orElse(false);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, true, agent, reason);
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, keepAllocation, agent, reason, transaction);
transaction.commit();
return parked;
}
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, keepAllocation, Node.State.parked, agent, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, Node.State.active, agent, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = node(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found"));
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
List<Node> removed = removeChildren(node, false);
removed.add(move(node, Node.State.breakfixed, agent, Optional.of(reason)));
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, keepAllocation, toState, agent, reason, transaction);
transaction.commit();
return moved;
}
private Node move(String hostname, boolean keepAllocation, Node.State toState, Agent agent, Optional<String> reason,
NestedTransaction transaction) {
Node node = node(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason, transaction);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(node, toState, agent, reason, transaction);
transaction.commit();
return moved;
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) {
if (toState == Node.State.active && node.allocation().isEmpty())
illegal("Could not set " + node + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = node(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(List.of(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = node(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
if (node.type().isHost()) {
List<Node> removed = removeChildren(node, force);
if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host)
db.removeNodes(List.of(node));
else {
node = node.with(IP.Config.EMPTY);
move(node, Node.State.deprovisioned, Agent.system, Optional.empty());
}
removed.add(node);
return removed;
}
else {
List<Node> removed = List.of(node);
db.removeNodes(removed);
return removed;
}
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
db.removeNodes(List.of(node));
}
private List<Node> removeChildren(Node node, boolean force) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node: node is unallocated
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingAsChild, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent())
illegal(node + " is currently allocated and cannot be removed");
if (!node.type().isHost() && !removingAsChild) {
if (node.state() != Node.State.ready)
illegal(node + " can not be removed as it is not in the state " + Node.State.ready);
}
else if (!node.type().isHost()) {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready);
if ( ! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
else {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.getCloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(Node host, Agent agent, Instant instant) {
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot deprovision non-host " + host);
Optional<NodeMutex> nodeMutex = lockAndGet(host);
if (nodeMutex.isEmpty()) return List.of();
List<Node> result;
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
host = lock.node();
NodeList children = list(allocationLock).childrenOf(host);
result = retire(NodeListFilter.from(children.asList()), agent, instant);
result.add(write(host.withWantToRetire(true, true, agent, instant), lock));
}
return result;
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.readNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning());
}
public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname()));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname));
}
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
} |
What's this used for, and will it be possible to trick it somehow, or is it just that we prefer the `getLocalPort()` over the `listenPort()` before it's more often right defined, but both these are always the same if defined? | public static int getConnectorLocalPort(HttpServletRequest request) {
JDiscServerConnector connector = (JDiscServerConnector) getConnection(request).getConnector();
int actualLocalPort = connector.getLocalPort();
int localPortIfConnectorUnopened = -1;
int localPortIfConnectorClosed = -2;
if (actualLocalPort == localPortIfConnectorUnopened || actualLocalPort == localPortIfConnectorClosed) {
int configuredLocalPort = connector.listenPort();
int localPortEphemeralPort = 0;
if (configuredLocalPort == localPortEphemeralPort) {
throw new IllegalStateException("Unable to determine connector's listen port");
}
return configuredLocalPort;
}
return actualLocalPort;
} | JDiscServerConnector connector = (JDiscServerConnector) getConnection(request).getConnector(); | public static int getConnectorLocalPort(HttpServletRequest request) {
JDiscServerConnector connector = (JDiscServerConnector) getConnection(request).getConnector();
int actualLocalPort = connector.getLocalPort();
int localPortIfConnectorUnopened = -1;
int localPortIfConnectorClosed = -2;
if (actualLocalPort == localPortIfConnectorUnopened || actualLocalPort == localPortIfConnectorClosed) {
int configuredLocalPort = connector.listenPort();
int localPortEphemeralPort = 0;
if (configuredLocalPort == localPortEphemeralPort) {
throw new IllegalStateException("Unable to determine connector's listen port");
}
return configuredLocalPort;
}
return actualLocalPort;
} | class HttpServletRequestUtils {
private HttpServletRequestUtils() {}
public static HttpConnection getConnection(HttpServletRequest request) {
return (HttpConnection)request.getAttribute("org.eclipse.jetty.server.HttpConnection");
}
/**
* Note: {@link HttpServletRequest
* @return the actual local port of the underlying Jetty connector
*/
} | class HttpServletRequestUtils {
private HttpServletRequestUtils() {}
public static HttpConnection getConnection(HttpServletRequest request) {
return (HttpConnection)request.getAttribute("org.eclipse.jetty.server.HttpConnection");
}
/**
* Note: {@link HttpServletRequest
* @return the actual local port of the underlying Jetty connector
*/
} |
It's the same unless `listenPort()` is configured to `0` (ephemeral port), where the OS determines the listen port (for unit tests). | public static int getConnectorLocalPort(HttpServletRequest request) {
JDiscServerConnector connector = (JDiscServerConnector) getConnection(request).getConnector();
int actualLocalPort = connector.getLocalPort();
int localPortIfConnectorUnopened = -1;
int localPortIfConnectorClosed = -2;
if (actualLocalPort == localPortIfConnectorUnopened || actualLocalPort == localPortIfConnectorClosed) {
int configuredLocalPort = connector.listenPort();
int localPortEphemeralPort = 0;
if (configuredLocalPort == localPortEphemeralPort) {
throw new IllegalStateException("Unable to determine connector's listen port");
}
return configuredLocalPort;
}
return actualLocalPort;
} | JDiscServerConnector connector = (JDiscServerConnector) getConnection(request).getConnector(); | public static int getConnectorLocalPort(HttpServletRequest request) {
JDiscServerConnector connector = (JDiscServerConnector) getConnection(request).getConnector();
int actualLocalPort = connector.getLocalPort();
int localPortIfConnectorUnopened = -1;
int localPortIfConnectorClosed = -2;
if (actualLocalPort == localPortIfConnectorUnopened || actualLocalPort == localPortIfConnectorClosed) {
int configuredLocalPort = connector.listenPort();
int localPortEphemeralPort = 0;
if (configuredLocalPort == localPortEphemeralPort) {
throw new IllegalStateException("Unable to determine connector's listen port");
}
return configuredLocalPort;
}
return actualLocalPort;
} | class HttpServletRequestUtils {
private HttpServletRequestUtils() {}
public static HttpConnection getConnection(HttpServletRequest request) {
return (HttpConnection)request.getAttribute("org.eclipse.jetty.server.HttpConnection");
}
/**
* Note: {@link HttpServletRequest
* @return the actual local port of the underlying Jetty connector
*/
} | class HttpServletRequestUtils {
private HttpServletRequestUtils() {}
public static HttpConnection getConnection(HttpServletRequest request) {
return (HttpConnection)request.getAttribute("org.eclipse.jetty.server.HttpConnection");
}
/**
* Note: {@link HttpServletRequest
* @return the actual local port of the underlying Jetty connector
*/
} |
Copy-pasted? | public void testUserManagement() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
Set<Role> operator = Set.of(Role.hostedOperator());
ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
tester.assertResponse(request("/application/v4/"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/api/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.data("{\"token\":\"hello\"}"),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"You are not currently permitted to create tenants. Please contact the Vespa team to request access.\"}", 403);
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.roles(operator)
.principal("administrator@tenant")
.data("{\"token\":\"hello\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/user/v1/"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(Role.administrator(id.tenant())))
.data("{\"user\":\"evil@evil\",\"roleName\":\"hostedOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'hostedOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(Role.administrator(id.tenant())))
.data("{\"user\":\"developer@tenant\",\"roles\":[\"developer\",\"reader\"]}"),
"{\"message\":\"user 'developer@tenant' is now a member of role 'developer' of 'my-tenant', role 'reader' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"user\":\"developer@tenant\",\"roleName\":\"administrator\"}"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(Role.administrator(TenantName.from("my-tenant"))))
.data("{\"user\":\"headless@app\",\"roleName\":\"headless\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"role 'headless' of 'my-app' owned by 'my-tenant' not found\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", POST)
.principal("developer@tenant")
.roles(Set.of(Role.developer(id.tenant()))),
new File("application-created.json"));
tester.assertResponse(request("/application/v4/tenant/other-tenant/application/my-app", POST)
.roles(Set.of(Role.administrator(id.tenant()))),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(Role.hostedOperator()))
.data("{\"user\":\"developer@app\",\"roleName\":\"developer\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'developer'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant")
.roles(Set.of(Role.reader(id.tenant()))),
new File("tenant-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(Role.administrator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/api/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(Role.administrator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app/key", POST)
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"key\":\"" + pemPublicKey + "\"}"),
new File("first-deploy-key.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/key", POST)
.principal("joe@dev")
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"key\":\"" + pemPublicKey + "\"}"),
new File("first-developer-key.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/key", POST)
.principal("operator@tenant")
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Key "+ quotedPemPublicKey + " is already owned by joe@dev\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my-tenant/key", POST)
.principal("developer@tenant")
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"key\":\"" + otherPemPublicKey + "\"}"),
new File("both-developer-keys.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/")
.roles(Set.of(Role.reader(id.tenant()))),
new File("tenant-with-keys.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/key", DELETE)
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"key\":\"" + pemPublicKey + "\"}"),
new File("second-developer-key.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/secret-store/", PUT)
.principal("developer@tenant")
.roles(Set.of(Role.administrator(id.tenant())))
.data("{\"name\":\"secret-foo\",\"awsId\":\"123\",\"role\":\"secret-role\",\"externalId\":\"abc\"}"),
"{\"message\":\"Configured secret store: TenantSecretStore{name='secret-foo', awsId='123', role='secret-role'}\"}",
200);
tester.assertResponse(request("/application/v4/tenant/my-tenant")
.principal("developer@tenant")
.roles(Set.of(Role.reader(id.tenant())))
.data("{\"name\":\"secret-foo\",\"awsId\":\"123\",\"role\":\"secret-role\",\"externalId\":\"abc\"}"),
new File("tenant-with-secrets.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(Role.developer(id.tenant()))),
"{\"message\":\"Deleted application my-tenant.my-app\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(Set.of(Role.administrator(id.tenant())))
.data("{\"user\":\"developer@tenant\",\"roles\":[\"developer\",\"reader\"]}"),
"{\"message\":\"user 'developer@tenant' is no longer a member of role 'developer' of 'my-tenant', role 'reader' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(operator)
.data("{\"user\":\"administrator@tenant\",\"roleName\":\"administrator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can't remove the last administrator of a tenant.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant", DELETE)
.roles(Set.of(Role.developer(id.tenant()))),
"{\n" +
" \"code\" : 403,\n" +
" \"message\" : \"Access denied\"\n" +
"}", 403);
} | .data("{\"name\":\"secret-foo\",\"awsId\":\"123\",\"role\":\"secret-role\",\"externalId\":\"abc\"}"), | public void testUserManagement() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
Set<Role> operator = Set.of(Role.hostedOperator());
ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
tester.assertResponse(request("/application/v4/"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/api/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.data("{\"token\":\"hello\"}"),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"You are not currently permitted to create tenants. Please contact the Vespa team to request access.\"}", 403);
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.roles(operator)
.principal("administrator@tenant")
.data("{\"token\":\"hello\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/user/v1/"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(Role.administrator(id.tenant())))
.data("{\"user\":\"evil@evil\",\"roleName\":\"hostedOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'hostedOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(Role.administrator(id.tenant())))
.data("{\"user\":\"developer@tenant\",\"roles\":[\"developer\",\"reader\"]}"),
"{\"message\":\"user 'developer@tenant' is now a member of role 'developer' of 'my-tenant', role 'reader' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"user\":\"developer@tenant\",\"roleName\":\"administrator\"}"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(Role.administrator(TenantName.from("my-tenant"))))
.data("{\"user\":\"headless@app\",\"roleName\":\"headless\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"role 'headless' of 'my-app' owned by 'my-tenant' not found\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", POST)
.principal("developer@tenant")
.roles(Set.of(Role.developer(id.tenant()))),
new File("application-created.json"));
tester.assertResponse(request("/application/v4/tenant/other-tenant/application/my-app", POST)
.roles(Set.of(Role.administrator(id.tenant()))),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(Role.hostedOperator()))
.data("{\"user\":\"developer@app\",\"roleName\":\"developer\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'developer'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant")
.roles(Set.of(Role.reader(id.tenant()))),
new File("tenant-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(Role.administrator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/api/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(Role.administrator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app/key", POST)
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"key\":\"" + pemPublicKey + "\"}"),
new File("first-deploy-key.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/key", POST)
.principal("joe@dev")
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"key\":\"" + pemPublicKey + "\"}"),
new File("first-developer-key.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/key", POST)
.principal("operator@tenant")
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Key "+ quotedPemPublicKey + " is already owned by joe@dev\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my-tenant/key", POST)
.principal("developer@tenant")
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"key\":\"" + otherPemPublicKey + "\"}"),
new File("both-developer-keys.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/")
.roles(Set.of(Role.reader(id.tenant()))),
new File("tenant-with-keys.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/key", DELETE)
.roles(Set.of(Role.developer(id.tenant())))
.data("{\"key\":\"" + pemPublicKey + "\"}"),
new File("second-developer-key.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/secret-store/", PUT)
.principal("admin@tenant")
.roles(Set.of(Role.administrator(id.tenant())))
.data("{\"name\":\"secret-foo\",\"awsId\":\"123\",\"role\":\"secret-role\",\"externalId\":\"abc\"}"),
"{\"message\":\"Configured secret store: TenantSecretStore{name='secret-foo', awsId='123', role='secret-role'}\"}",
200);
tester.assertResponse(request("/application/v4/tenant/my-tenant")
.principal("reader@tenant")
.roles(Set.of(Role.reader(id.tenant()))),
new File("tenant-with-secrets.json"));
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(Role.developer(id.tenant()))),
"{\"message\":\"Deleted application my-tenant.my-app\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(Set.of(Role.administrator(id.tenant())))
.data("{\"user\":\"developer@tenant\",\"roles\":[\"developer\",\"reader\"]}"),
"{\"message\":\"user 'developer@tenant' is no longer a member of role 'developer' of 'my-tenant', role 'reader' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(operator)
.data("{\"user\":\"administrator@tenant\",\"roleName\":\"administrator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can't remove the last administrator of a tenant.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant", DELETE)
.roles(Set.of(Role.developer(id.tenant()))),
"{\n" +
" \"code\" : 403,\n" +
" \"message\" : \"Access denied\"\n" +
"}", 403);
} | class UserApiTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String otherPemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" +
"pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
@Test
@Test
public void userMetadataTest() {
ContainerTester tester = new ContainerTester(container, responseFiles);
((InMemoryFlagSource) tester.controller().flagSource())
.withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
ControllerTester controller = new ControllerTester(tester);
Set<Role> operator = Set.of(Role.hostedOperator(), Role.hostedSupporter(), Role.hostedAccountant());
User user = new User("dev@domail", "Joe Developer", "dev", null);
tester.assertResponse(request("/api/user/v1/user")
.roles(operator)
.user(user),
new File("user-without-applications.json"));
controller.createTenant("tenant1", Tenant.Type.cloud);
controller.createApplication("tenant1", "app1", "default");
controller.createApplication("tenant1", "app2", "default");
controller.createApplication("tenant1", "app2", "myinstance");
controller.createApplication("tenant1", "app3");
controller.createTenant("tenant2", Tenant.Type.cloud);
controller.createApplication("tenant2", "app2", "test");
controller.createTenant("tenant3", Tenant.Type.cloud);
controller.createApplication("tenant3", "app1");
controller.createTenant("sandbox", Tenant.Type.cloud);
controller.createApplication("sandbox", "app1", "default");
controller.createApplication("sandbox", "app2", "default");
controller.createApplication("sandbox", "app2", "dev");
tester.assertResponse(request("/api/user/v1/user")
.roles(operator)
.user(user),
new File("user-without-applications.json"));
tester.assertResponse(request("/api/user/v1/user")
.roles(Set.of(Role.administrator(TenantName.from("tenant1")),
Role.developer(TenantName.from("tenant2")),
Role.developer(TenantName.from("sandbox")),
Role.reader(TenantName.from("sandbox"))))
.user(user),
new File("user-with-applications-cloud.json"));
}
@Test
public void maxTrialTenants() {
ContainerTester tester = new ContainerTester(container, responseFiles);
((InMemoryFlagSource) tester.controller().flagSource())
.withIntFlag(Flags.MAX_TRIAL_TENANTS.id(), 1)
.withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
ControllerTester controller = new ControllerTester(tester);
Set<Role> operator = Set.of(Role.hostedOperator(), Role.hostedSupporter(), Role.hostedAccountant());
User user = new User("dev@domail", "Joe Developer", "dev", null);
controller.createTenant("tenant1", Tenant.Type.cloud);
tester.assertResponse(
request("/api/user/v1/user").user(user),
new File("user-without-trial-capacity-cloud.json"));
}
} | class UserApiTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String otherPemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" +
"pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
@Test
@Test
public void userMetadataTest() {
ContainerTester tester = new ContainerTester(container, responseFiles);
((InMemoryFlagSource) tester.controller().flagSource())
.withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
ControllerTester controller = new ControllerTester(tester);
Set<Role> operator = Set.of(Role.hostedOperator(), Role.hostedSupporter(), Role.hostedAccountant());
User user = new User("dev@domail", "Joe Developer", "dev", null);
tester.assertResponse(request("/api/user/v1/user")
.roles(operator)
.user(user),
new File("user-without-applications.json"));
controller.createTenant("tenant1", Tenant.Type.cloud);
controller.createApplication("tenant1", "app1", "default");
controller.createApplication("tenant1", "app2", "default");
controller.createApplication("tenant1", "app2", "myinstance");
controller.createApplication("tenant1", "app3");
controller.createTenant("tenant2", Tenant.Type.cloud);
controller.createApplication("tenant2", "app2", "test");
controller.createTenant("tenant3", Tenant.Type.cloud);
controller.createApplication("tenant3", "app1");
controller.createTenant("sandbox", Tenant.Type.cloud);
controller.createApplication("sandbox", "app1", "default");
controller.createApplication("sandbox", "app2", "default");
controller.createApplication("sandbox", "app2", "dev");
tester.assertResponse(request("/api/user/v1/user")
.roles(operator)
.user(user),
new File("user-without-applications.json"));
tester.assertResponse(request("/api/user/v1/user")
.roles(Set.of(Role.administrator(TenantName.from("tenant1")),
Role.developer(TenantName.from("tenant2")),
Role.developer(TenantName.from("sandbox")),
Role.reader(TenantName.from("sandbox"))))
.user(user),
new File("user-with-applications-cloud.json"));
}
@Test
public void maxTrialTenants() {
ContainerTester tester = new ContainerTester(container, responseFiles);
((InMemoryFlagSource) tester.controller().flagSource())
.withIntFlag(Flags.MAX_TRIAL_TENANTS.id(), 1)
.withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
ControllerTester controller = new ControllerTester(tester);
Set<Role> operator = Set.of(Role.hostedOperator(), Role.hostedSupporter(), Role.hostedAccountant());
User user = new User("dev@domail", "Joe Developer", "dev", null);
controller.createTenant("tenant1", Tenant.Type.cloud);
tester.assertResponse(
request("/api/user/v1/user").user(user),
new File("user-without-trial-capacity-cloud.json"));
}
} |
More generally, this should be invoked after all database.doNextZooKeeperTask() invocations. There are 6 of these invocations, but only 2 are followed by `systemStateBroadcaster.setLastClusterStateVersionWrittenToZooKeeper()` which seems wrong. But instead of this setter it seems better to pass `database.getLastKnownStateBundleVersionWrittenBySelf()` as an argument to broadcastNewStateBundleIfRequired() ? | public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = database.doNextZooKeeperTask(databaseContext);
systemStateBroadcaster.setLastClusterStateVersionWrittenToZooKeeper(
database.getLastKnownStateBundleVersionWrittenBySelf());
didWork |= updateMasterElectionState();
didWork |= handleLeadershipEdgeTransitions();
stateChangeHandler.setMaster(isMaster);
if ( ! isRunning()) { return; }
didWork |= stateGatherer.processResponses(this);
if ( ! isRunning()) { return; }
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
if ( ! isRunning()) { return; }
didWork |= systemStateBroadcaster.processResponses();
if ( ! isRunning()) { return; }
if (masterElectionHandler.isMaster()) {
didWork |= broadcastClusterStateToEligibleNodes();
systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this);
}
if ( ! isRunning()) { return; }
didWork |= processAnyPendingStatusPageRequest();
if ( ! isRunning()) { return; }
if (rpcServer != null) {
didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this);
}
if ( ! isRunning()) { return; }
didWork |= processNextQueuedRemoteTask();
didWork |= completeSatisfiedVersionDependentTasks();
didWork |= maybePublishOldMetrics();
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime) {
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
}
if ( ! didWork && ! waitingForCycle) {
monitor.wait(options.cycleWaitTime);
}
if ( ! isRunning()) { return; }
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
if (isRunning()) {
propagateNewStatesToListeners();
}
} | systemStateBroadcaster.setLastClusterStateVersionWrittenToZooKeeper( | public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = database.doNextZooKeeperTask(databaseContext);
didWork |= updateMasterElectionState();
didWork |= handleLeadershipEdgeTransitions();
stateChangeHandler.setMaster(isMaster);
if ( ! isRunning()) { return; }
didWork |= stateGatherer.processResponses(this);
if ( ! isRunning()) { return; }
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
if ( ! isRunning()) { return; }
didWork |= systemStateBroadcaster.processResponses();
if ( ! isRunning()) { return; }
if (masterElectionHandler.isMaster()) {
didWork |= broadcastClusterStateToEligibleNodes();
systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this);
}
if ( ! isRunning()) { return; }
didWork |= processAnyPendingStatusPageRequest();
if ( ! isRunning()) { return; }
if (rpcServer != null) {
didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this);
}
if ( ! isRunning()) { return; }
didWork |= processNextQueuedRemoteTask();
didWork |= completeSatisfiedVersionDependentTasks();
didWork |= maybePublishOldMetrics();
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime) {
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
}
if ( ! didWork && ! waitingForCycle) {
monitor.wait(options.cycleWaitTime);
}
if ( ! isRunning()) { return; }
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
if (isRunning()) {
propagateNewStatesToListeners();
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static final Logger log = Logger.getLogger(FleetController.class.getName());
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private final AtomicBoolean running = new AtomicBoolean(true);
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long lastMetricUpdateCycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<ClusterStateBundle> newStates = new ArrayList<>();
private final List<ClusterStateBundle> convergedStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>();
private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>();
private Set<String> configuredBucketSpaces = Collections.emptySet();
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) {
log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName());
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController create(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
Timer timer = new RealTimer();
MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex);
EventLog log = new EventLog(timer, metricUpdater);
ContentCluster cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution);
NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log);
Communicator communicator = new RPCCommunicator(
RPCCommunicator.createRealSupervisor(),
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
NodeLookup lookUp = new SlobrokClient(timer);
StateChangeHandler stateGenerator = new StateChangeHandler(timer, log);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(
timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
return running.get();
}
public boolean isMaster() {
synchronized (monitor) {
return masterElectionHandler.isMaster();
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public ClusterStateBundle getClusterStateBundle() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterStateBundle();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
log.fine("Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) {
throw new NullPointerException("Cluster state should never be null at this point");
}
listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state)));
ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged();
if (convergedState != null) {
listener.handleStateConvergedInCluster(convergedState);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
if (runner != null && isRunning()) {
log.log(Level.INFO, "Joining event thread.");
running.set(false);
synchronized(monitor) { monitor.notifyAll(); }
runner.join();
}
log.log(Level.INFO, "Fleetcontroller done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(this);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
synchronized(monitor) {
assert(this.options.fleetControllerIndex == options.fleetControllerIndex);
log.log(Level.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo);
stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo);
}
private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) {
if (!options.clusterFeedBlockEnabled) {
return;
}
var calc = createResourceExhaustionCalculator();
var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo);
var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo);
if (!previouslyExhausted.equals(nowExhausted)) {
log.fine(() -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s",
previouslyExhausted, nowExhausted));
stateChangeHandler.setStateChangedFlag();
}
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
@Override
public void handleNewPublishedState(ClusterStateBundle stateBundle) {
verifyInControllerThread();
ClusterState baselineState = stateBundle.getBaselineClusterState();
newStates.add(stateBundle);
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
systemStateBroadcaster.handleNewClusterStates(stateBundle);
if (masterElectionHandler.isMaster()) {
storeClusterStateMetaDataToZooKeeper(stateBundle);
}
}
private boolean maybePublishOldMetrics() {
verifyInControllerThread();
if (cycleCount > 300 + lastMetricUpdateCycleCount) {
ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle();
ClusterState baselineState = stateBundle.getBaselineClusterState();
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
return true;
} else {
return false;
}
}
private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) {
try {
database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion());
database.saveLatestClusterStateBundle(databaseContext, stateBundle);
systemStateBroadcaster.setLastClusterStateVersionWrittenToZooKeeper(database.getLastKnownStateBundleVersionWrittenBySelf());
} catch (InterruptedException e) {
throw new RuntimeException("ZooKeeper write interrupted", e);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indexes of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
log.log(Level.FINEST, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
masterElectionHandler.lostDatabaseConnection();
}
private void failAllVersionDependentTasks() {
tasksPendingStateRecompute.forEach(task -> {
task.handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.notifyCompleted();
});
tasksPendingStateRecompute.clear();
taskCompletionQueue.forEach(task -> {
task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.getTask().notifyCompleted();
});
taskCompletionQueue.clear();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
var currentBundle = stateVersionTracker.getVersionedClusterStateBundle();
log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion()));
stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, context);
convergedStates.add(currentBundle);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) {
return true;
}
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() {
verifyInControllerThread();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
configuredBucketSpaces = Collections.unmodifiableSet(
Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace())
.collect(Collectors.toSet()));
stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio);
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient) {
((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
}
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
database.setZooKeeperAddress(options.zooKeeperServerAddress);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
log.log(Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
log.log(Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
log.log(Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
String hiddenMessage = "";
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTraceAsString(e);
log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() +
": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
private boolean updateMasterElectionState() throws InterruptedException {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
log.log(Level.WARNING, "Failed to watch master election: " + e.toString());
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
log.log(Level.SEVERE, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() {
if (database.hasPendingClusterStateMetaDataStore()) {
log.log(Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores");
return false;
}
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (currentTime < firstAllowedStateBroadcast) {
log.log(Level.FINE, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely.");
firstAllowedStateBroadcast = currentTime;
}
sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired(databaseContext, communicator);
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator);
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : newStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleNewPublishedState(stateBundle);
}
}
newStates.clear();
}
}
if ( ! convergedStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : convergedStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleStateConvergedInCluster(stateBundle);
}
}
convergedStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
if ( ! remoteTasks.isEmpty()) {
final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext();
final RemoteClusterControllerTask task = remoteTasks.poll();
log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName()));
task.doRemoteFleetControllerTask(context);
if (taskMayBeCompletedImmediately(task)) {
log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName()));
task.notifyCompleted();
} else {
log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName()));
tasksPendingStateRecompute.add(task);
}
return true;
}
return false;
}
private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) {
return (!task.hasVersionAckDependency() || task.isFailed() || !masterElectionHandler.isMaster());
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentConsolidatedState = consolidatedClusterState();
context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.masterInfo = masterElectionHandler;
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) {
return bundle.deferredActivation()
? nodeInfo.getClusterStateVersionActivationAcked()
: nodeInfo.getClusterStateVersionBundleAcknowledged();
}
private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) {
var bundle = systemStateBroadcaster.getClusterStateBundle();
if (bundle == null) {
return List.of();
}
return cluster.getNodeInfo().stream().
filter(n -> effectiveActivatedStateVersion(n, bundle) < version).
map(NodeInfo::getNode).
collect(Collectors.toList());
}
private static <E> String stringifyListWithLimits(List<E> list, int limit) {
if (list.size() > limit) {
var sub = list.subList(0, limit);
return String.format("%s (... and %d more)",
sub.stream().map(E::toString).collect(Collectors.joining(", ")),
list.size() - limit);
} else {
return list.stream().map(E::toString).collect(Collectors.joining(", "));
}
}
private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) {
var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion);
if (nodes.isEmpty()) {
return "";
}
return String.format("the following nodes have not converged to at least version %d: %s",
taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages));
}
private boolean completeSatisfiedVersionDependentTasks() {
int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync();
long queueSizeBefore = taskCompletionQueue.size();
final long now = timer.getCurrentTimeInMillis();
while (!taskCompletionQueue.isEmpty()) {
VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek();
if (publishedVersion >= taskCompletion.getMinimumVersion()) {
log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing",
taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else if (taskCompletion.getDeadlineTimePointMs() <= now) {
var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion());
log.log(Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)",
taskCompletion.getTask().getClass().getName(), details));
taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else {
break;
}
}
return (taskCompletionQueue.size() != queueSizeBefore);
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() throws InterruptedException {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = database.loadWantedStates(databaseContext);
didWork |= database.loadStartTimestamps(cluster);
}
didWork |= nodeLookup.updateCluster(cluster, this);
didWork |= stateGatherer.sendMessages(cluster, communicator, this);
didWork |= stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this);
didWork |= recomputeClusterStateIfRequired();
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
stateChangeHandler.setStateChangedFlag();
}
}
isStateGatherer = true;
return didWork;
}
private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) {
systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle));
}
private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) {
return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported();
}
private boolean recomputeClusterStateIfRequired() {
boolean stateWasChanged = false;
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate)
.bucketSpaces(configuredBucketSpaces)
.stateDeriver(createBucketSpaceStateDeriver())
.deferredActivation(options.enableTwoPhaseClusterStateActivation)
.feedBlock(createResourceExhaustionCalculator()
.inferContentClusterFeedBlockOrNull(cluster.getNodeInfo()))
.deriveAndBuild();
stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle);
invokeCandidateStateListeners(candidateBundle);
final long timeNowMs = timer.getCurrentTimeInMillis();
if (hasPassedFirstStateBroadcastTimePoint(timeNowMs)
&& (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper()))
{
final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs);
handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle());
stateWasChanged = true;
}
}
/*
* This works transparently for tasks that end up changing the current cluster state (i.e.
* requiring a new state to be published) and for those whose changes are no-ops (because
* the changes they request are already part of the current state). In the former case the
* tasks will depend on the version that was generated based upon them. In the latter case
* the tasks will depend on the version that is already published (or in the process of
* being published).
*/
scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion());
return stateWasChanged;
}
private ClusterStateDeriver createBucketSpaceStateDeriver() {
if (options.clusterHasGlobalDocumentTypes) {
return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(),
createDefaultSpaceMaintenanceTransitionConstraint());
} else {
return createIdentityClonedBucketSpaceStateDeriver();
}
}
private ResourceExhaustionCalculator createResourceExhaustionCalculator() {
return new ResourceExhaustionCalculator(
options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit,
stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(),
options.clusterFeedBlockNoiseLevel);
}
private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() {
return (state, space) -> state.clone();
}
private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() {
AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle()
.getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState());
return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState());
}
/**
* Move tasks that are dependent on the most recently generated state being published into
* a completion queue with a dependency on the provided version argument. Once that version
* has been ACKed by all distributors in the system, those tasks will be marked as completed.
*/
private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) {
final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis();
for (RemoteClusterControllerTask task : tasksPendingStateRecompute) {
log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d",
task.getClass().getName(), completeAtVersion));
taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs));
}
tasksPendingStateRecompute.clear();
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState,
final ClusterStateBundle toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs)
.maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs()));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean atFirstClusterStateSendTimeEdge() {
if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) {
return false;
}
return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis());
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged()
|| stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged()
|| atFirstClusterStateSendTimeEdge();
}
private boolean handleLeadershipEdgeTransitions() throws InterruptedException {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
stateChangeHandler.setStateChangedFlag();
systemStateBroadcaster.resetBroadcastedClusterStateBundle();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
ClusterStateBundle previousBundle = database.getLatestClusterStateBundle();
database.loadStartTimestamps(cluster);
database.loadWantedStates(databaseContext);
log.info(() -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle));
stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
log.log(Level.FINE, "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
didWork = true;
}
isMaster = true;
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
failAllVersionDependentTasks();
}
wantedStateChanged = false;
isMaster = false;
}
metricUpdater.updateMasterState(isMaster);
return didWork;
}
public void run() {
controllerThreadId = Thread.currentThread().getId();
try {
processingCycle = true;
while( isRunning() ) {
tick();
}
} catch (InterruptedException e) {
log.log(Level.FINE, "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
log.log(Level.SEVERE, "Fatal error killed fleet controller", t);
synchronized (monitor) { running.set(false); }
System.exit(1);
} finally {
running.set(false);
failAllVersionDependentTasks();
synchronized (monitor) { monitor.notifyAll(); }
}
}
public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getClusterStateVersionBundleAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
log.log(Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static final Logger log = Logger.getLogger(FleetController.class.getName());
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private final AtomicBoolean running = new AtomicBoolean(true);
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long lastMetricUpdateCycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<ClusterStateBundle> newStates = new ArrayList<>();
private final List<ClusterStateBundle> convergedStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>();
private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>();
private Set<String> configuredBucketSpaces = Collections.emptySet();
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) {
log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName());
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController create(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
Timer timer = new RealTimer();
MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex);
EventLog log = new EventLog(timer, metricUpdater);
ContentCluster cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution);
NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log);
Communicator communicator = new RPCCommunicator(
RPCCommunicator.createRealSupervisor(),
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
NodeLookup lookUp = new SlobrokClient(timer);
StateChangeHandler stateGenerator = new StateChangeHandler(timer, log);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(
timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
return running.get();
}
public boolean isMaster() {
synchronized (monitor) {
return masterElectionHandler.isMaster();
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public ClusterStateBundle getClusterStateBundle() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterStateBundle();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
log.fine("Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) {
throw new NullPointerException("Cluster state should never be null at this point");
}
listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state)));
ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged();
if (convergedState != null) {
listener.handleStateConvergedInCluster(convergedState);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
if (runner != null && isRunning()) {
log.log(Level.INFO, "Joining event thread.");
running.set(false);
synchronized(monitor) { monitor.notifyAll(); }
runner.join();
}
log.log(Level.INFO, "Fleetcontroller done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(this);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
synchronized(monitor) {
assert(this.options.fleetControllerIndex == options.fleetControllerIndex);
log.log(Level.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo);
stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo);
}
private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) {
if (!options.clusterFeedBlockEnabled) {
return;
}
var calc = createResourceExhaustionCalculator();
var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo);
var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo);
if (!previouslyExhausted.equals(nowExhausted)) {
log.fine(() -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s",
previouslyExhausted, nowExhausted));
stateChangeHandler.setStateChangedFlag();
}
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
@Override
public void handleNewPublishedState(ClusterStateBundle stateBundle) {
verifyInControllerThread();
ClusterState baselineState = stateBundle.getBaselineClusterState();
newStates.add(stateBundle);
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
systemStateBroadcaster.handleNewClusterStates(stateBundle);
if (masterElectionHandler.isMaster()) {
storeClusterStateMetaDataToZooKeeper(stateBundle);
}
}
private boolean maybePublishOldMetrics() {
verifyInControllerThread();
if (cycleCount > 300 + lastMetricUpdateCycleCount) {
ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle();
ClusterState baselineState = stateBundle.getBaselineClusterState();
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
return true;
} else {
return false;
}
}
private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) {
try {
database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion());
database.saveLatestClusterStateBundle(databaseContext, stateBundle);
} catch (InterruptedException e) {
throw new RuntimeException("ZooKeeper write interrupted", e);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indexes of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
log.log(Level.FINEST, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
masterElectionHandler.lostDatabaseConnection();
}
private void failAllVersionDependentTasks() {
tasksPendingStateRecompute.forEach(task -> {
task.handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.notifyCompleted();
});
tasksPendingStateRecompute.clear();
taskCompletionQueue.forEach(task -> {
task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.getTask().notifyCompleted();
});
taskCompletionQueue.clear();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
var currentBundle = stateVersionTracker.getVersionedClusterStateBundle();
log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion()));
stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, context);
convergedStates.add(currentBundle);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) {
return true;
}
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() {
verifyInControllerThread();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
configuredBucketSpaces = Collections.unmodifiableSet(
Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace())
.collect(Collectors.toSet()));
stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio);
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient) {
((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
}
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
database.setZooKeeperAddress(options.zooKeeperServerAddress);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
log.log(Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
log.log(Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
log.log(Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
String hiddenMessage = "";
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTraceAsString(e);
log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() +
": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
private boolean updateMasterElectionState() throws InterruptedException {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
log.log(Level.WARNING, "Failed to watch master election: " + e.toString());
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
log.log(Level.SEVERE, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() {
if (database.hasPendingClusterStateMetaDataStore()) {
log.log(Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores");
return false;
}
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (currentTime < firstAllowedStateBroadcast) {
log.log(Level.FINE, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely.");
firstAllowedStateBroadcast = currentTime;
}
sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired(
databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf());
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator);
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : newStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleNewPublishedState(stateBundle);
}
}
newStates.clear();
}
}
if ( ! convergedStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : convergedStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleStateConvergedInCluster(stateBundle);
}
}
convergedStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
if ( ! remoteTasks.isEmpty()) {
final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext();
final RemoteClusterControllerTask task = remoteTasks.poll();
log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName()));
task.doRemoteFleetControllerTask(context);
if (taskMayBeCompletedImmediately(task)) {
log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName()));
task.notifyCompleted();
} else {
log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName()));
tasksPendingStateRecompute.add(task);
}
return true;
}
return false;
}
private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) {
return (!task.hasVersionAckDependency() || task.isFailed() || !masterElectionHandler.isMaster());
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentConsolidatedState = consolidatedClusterState();
context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.masterInfo = masterElectionHandler;
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) {
return bundle.deferredActivation()
? nodeInfo.getClusterStateVersionActivationAcked()
: nodeInfo.getClusterStateVersionBundleAcknowledged();
}
private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) {
var bundle = systemStateBroadcaster.getClusterStateBundle();
if (bundle == null) {
return List.of();
}
return cluster.getNodeInfo().stream().
filter(n -> effectiveActivatedStateVersion(n, bundle) < version).
map(NodeInfo::getNode).
collect(Collectors.toList());
}
private static <E> String stringifyListWithLimits(List<E> list, int limit) {
if (list.size() > limit) {
var sub = list.subList(0, limit);
return String.format("%s (... and %d more)",
sub.stream().map(E::toString).collect(Collectors.joining(", ")),
list.size() - limit);
} else {
return list.stream().map(E::toString).collect(Collectors.joining(", "));
}
}
private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) {
var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion);
if (nodes.isEmpty()) {
return "";
}
return String.format("the following nodes have not converged to at least version %d: %s",
taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages));
}
private boolean completeSatisfiedVersionDependentTasks() {
int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync();
long queueSizeBefore = taskCompletionQueue.size();
final long now = timer.getCurrentTimeInMillis();
while (!taskCompletionQueue.isEmpty()) {
VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek();
if (publishedVersion >= taskCompletion.getMinimumVersion()) {
log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing",
taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else if (taskCompletion.getDeadlineTimePointMs() <= now) {
var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion());
log.log(Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)",
taskCompletion.getTask().getClass().getName(), details));
taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else {
break;
}
}
return (taskCompletionQueue.size() != queueSizeBefore);
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() throws InterruptedException {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = database.loadWantedStates(databaseContext);
didWork |= database.loadStartTimestamps(cluster);
}
didWork |= nodeLookup.updateCluster(cluster, this);
didWork |= stateGatherer.sendMessages(cluster, communicator, this);
didWork |= stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this);
didWork |= recomputeClusterStateIfRequired();
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
stateChangeHandler.setStateChangedFlag();
}
}
isStateGatherer = true;
return didWork;
}
private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) {
systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle));
}
private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) {
return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported();
}
private boolean recomputeClusterStateIfRequired() {
boolean stateWasChanged = false;
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate)
.bucketSpaces(configuredBucketSpaces)
.stateDeriver(createBucketSpaceStateDeriver())
.deferredActivation(options.enableTwoPhaseClusterStateActivation)
.feedBlock(createResourceExhaustionCalculator()
.inferContentClusterFeedBlockOrNull(cluster.getNodeInfo()))
.deriveAndBuild();
stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle);
invokeCandidateStateListeners(candidateBundle);
final long timeNowMs = timer.getCurrentTimeInMillis();
if (hasPassedFirstStateBroadcastTimePoint(timeNowMs)
&& (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper()))
{
final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs);
handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle());
stateWasChanged = true;
}
}
/*
* This works transparently for tasks that end up changing the current cluster state (i.e.
* requiring a new state to be published) and for those whose changes are no-ops (because
* the changes they request are already part of the current state). In the former case the
* tasks will depend on the version that was generated based upon them. In the latter case
* the tasks will depend on the version that is already published (or in the process of
* being published).
*/
scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion());
return stateWasChanged;
}
private ClusterStateDeriver createBucketSpaceStateDeriver() {
if (options.clusterHasGlobalDocumentTypes) {
return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(),
createDefaultSpaceMaintenanceTransitionConstraint());
} else {
return createIdentityClonedBucketSpaceStateDeriver();
}
}
private ResourceExhaustionCalculator createResourceExhaustionCalculator() {
return new ResourceExhaustionCalculator(
options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit,
stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(),
options.clusterFeedBlockNoiseLevel);
}
private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() {
return (state, space) -> state.clone();
}
private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() {
AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle()
.getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState());
return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState());
}
/**
* Move tasks that are dependent on the most recently generated state being published into
* a completion queue with a dependency on the provided version argument. Once that version
* has been ACKed by all distributors in the system, those tasks will be marked as completed.
*/
private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) {
final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis();
for (RemoteClusterControllerTask task : tasksPendingStateRecompute) {
log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d",
task.getClass().getName(), completeAtVersion));
taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs));
}
tasksPendingStateRecompute.clear();
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState,
final ClusterStateBundle toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs)
.maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs()));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean atFirstClusterStateSendTimeEdge() {
if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) {
return false;
}
return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis());
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged()
|| stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged()
|| atFirstClusterStateSendTimeEdge();
}
private boolean handleLeadershipEdgeTransitions() throws InterruptedException {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
stateChangeHandler.setStateChangedFlag();
systemStateBroadcaster.resetBroadcastedClusterStateBundle();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
ClusterStateBundle previousBundle = database.getLatestClusterStateBundle();
database.loadStartTimestamps(cluster);
database.loadWantedStates(databaseContext);
log.info(() -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle));
stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
log.log(Level.FINE, "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
didWork = true;
}
isMaster = true;
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
failAllVersionDependentTasks();
}
wantedStateChanged = false;
isMaster = false;
}
metricUpdater.updateMasterState(isMaster);
return didWork;
}
public void run() {
controllerThreadId = Thread.currentThread().getId();
try {
processingCycle = true;
while( isRunning() ) {
tick();
}
} catch (InterruptedException e) {
log.log(Level.FINE, "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
log.log(Level.SEVERE, "Fatal error killed fleet controller", t);
synchronized (monitor) { running.set(false); }
System.exit(1);
} finally {
running.set(false);
failAllVersionDependentTasks();
synchronized (monitor) { monitor.notifyAll(); }
}
}
public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getClusterStateVersionBundleAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
log.log(Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} |
Agreed, will change to explicitly pass the version instead. | public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = database.doNextZooKeeperTask(databaseContext);
systemStateBroadcaster.setLastClusterStateVersionWrittenToZooKeeper(
database.getLastKnownStateBundleVersionWrittenBySelf());
didWork |= updateMasterElectionState();
didWork |= handleLeadershipEdgeTransitions();
stateChangeHandler.setMaster(isMaster);
if ( ! isRunning()) { return; }
didWork |= stateGatherer.processResponses(this);
if ( ! isRunning()) { return; }
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
if ( ! isRunning()) { return; }
didWork |= systemStateBroadcaster.processResponses();
if ( ! isRunning()) { return; }
if (masterElectionHandler.isMaster()) {
didWork |= broadcastClusterStateToEligibleNodes();
systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this);
}
if ( ! isRunning()) { return; }
didWork |= processAnyPendingStatusPageRequest();
if ( ! isRunning()) { return; }
if (rpcServer != null) {
didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this);
}
if ( ! isRunning()) { return; }
didWork |= processNextQueuedRemoteTask();
didWork |= completeSatisfiedVersionDependentTasks();
didWork |= maybePublishOldMetrics();
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime) {
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
}
if ( ! didWork && ! waitingForCycle) {
monitor.wait(options.cycleWaitTime);
}
if ( ! isRunning()) { return; }
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
if (isRunning()) {
propagateNewStatesToListeners();
}
} | systemStateBroadcaster.setLastClusterStateVersionWrittenToZooKeeper( | public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = database.doNextZooKeeperTask(databaseContext);
didWork |= updateMasterElectionState();
didWork |= handleLeadershipEdgeTransitions();
stateChangeHandler.setMaster(isMaster);
if ( ! isRunning()) { return; }
didWork |= stateGatherer.processResponses(this);
if ( ! isRunning()) { return; }
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
if ( ! isRunning()) { return; }
didWork |= systemStateBroadcaster.processResponses();
if ( ! isRunning()) { return; }
if (masterElectionHandler.isMaster()) {
didWork |= broadcastClusterStateToEligibleNodes();
systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this);
}
if ( ! isRunning()) { return; }
didWork |= processAnyPendingStatusPageRequest();
if ( ! isRunning()) { return; }
if (rpcServer != null) {
didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this);
}
if ( ! isRunning()) { return; }
didWork |= processNextQueuedRemoteTask();
didWork |= completeSatisfiedVersionDependentTasks();
didWork |= maybePublishOldMetrics();
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime) {
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
}
if ( ! didWork && ! waitingForCycle) {
monitor.wait(options.cycleWaitTime);
}
if ( ! isRunning()) { return; }
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
if (isRunning()) {
propagateNewStatesToListeners();
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static final Logger log = Logger.getLogger(FleetController.class.getName());
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private final AtomicBoolean running = new AtomicBoolean(true);
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long lastMetricUpdateCycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<ClusterStateBundle> newStates = new ArrayList<>();
private final List<ClusterStateBundle> convergedStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>();
private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>();
private Set<String> configuredBucketSpaces = Collections.emptySet();
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) {
log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName());
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController create(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
Timer timer = new RealTimer();
MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex);
EventLog log = new EventLog(timer, metricUpdater);
ContentCluster cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution);
NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log);
Communicator communicator = new RPCCommunicator(
RPCCommunicator.createRealSupervisor(),
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
NodeLookup lookUp = new SlobrokClient(timer);
StateChangeHandler stateGenerator = new StateChangeHandler(timer, log);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(
timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
return running.get();
}
public boolean isMaster() {
synchronized (monitor) {
return masterElectionHandler.isMaster();
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public ClusterStateBundle getClusterStateBundle() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterStateBundle();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
log.fine("Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) {
throw new NullPointerException("Cluster state should never be null at this point");
}
listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state)));
ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged();
if (convergedState != null) {
listener.handleStateConvergedInCluster(convergedState);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
if (runner != null && isRunning()) {
log.log(Level.INFO, "Joining event thread.");
running.set(false);
synchronized(monitor) { monitor.notifyAll(); }
runner.join();
}
log.log(Level.INFO, "Fleetcontroller done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(this);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
synchronized(monitor) {
assert(this.options.fleetControllerIndex == options.fleetControllerIndex);
log.log(Level.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo);
stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo);
}
private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) {
if (!options.clusterFeedBlockEnabled) {
return;
}
var calc = createResourceExhaustionCalculator();
var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo);
var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo);
if (!previouslyExhausted.equals(nowExhausted)) {
log.fine(() -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s",
previouslyExhausted, nowExhausted));
stateChangeHandler.setStateChangedFlag();
}
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
@Override
public void handleNewPublishedState(ClusterStateBundle stateBundle) {
verifyInControllerThread();
ClusterState baselineState = stateBundle.getBaselineClusterState();
newStates.add(stateBundle);
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
systemStateBroadcaster.handleNewClusterStates(stateBundle);
if (masterElectionHandler.isMaster()) {
storeClusterStateMetaDataToZooKeeper(stateBundle);
}
}
private boolean maybePublishOldMetrics() {
verifyInControllerThread();
if (cycleCount > 300 + lastMetricUpdateCycleCount) {
ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle();
ClusterState baselineState = stateBundle.getBaselineClusterState();
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
return true;
} else {
return false;
}
}
private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) {
try {
database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion());
database.saveLatestClusterStateBundle(databaseContext, stateBundle);
systemStateBroadcaster.setLastClusterStateVersionWrittenToZooKeeper(database.getLastKnownStateBundleVersionWrittenBySelf());
} catch (InterruptedException e) {
throw new RuntimeException("ZooKeeper write interrupted", e);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indexes of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
log.log(Level.FINEST, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
masterElectionHandler.lostDatabaseConnection();
}
private void failAllVersionDependentTasks() {
tasksPendingStateRecompute.forEach(task -> {
task.handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.notifyCompleted();
});
tasksPendingStateRecompute.clear();
taskCompletionQueue.forEach(task -> {
task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.getTask().notifyCompleted();
});
taskCompletionQueue.clear();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
var currentBundle = stateVersionTracker.getVersionedClusterStateBundle();
log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion()));
stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, context);
convergedStates.add(currentBundle);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) {
return true;
}
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() {
verifyInControllerThread();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
configuredBucketSpaces = Collections.unmodifiableSet(
Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace())
.collect(Collectors.toSet()));
stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio);
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient) {
((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
}
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
database.setZooKeeperAddress(options.zooKeeperServerAddress);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
log.log(Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
log.log(Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
log.log(Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
String hiddenMessage = "";
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTraceAsString(e);
log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() +
": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
private boolean updateMasterElectionState() throws InterruptedException {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
log.log(Level.WARNING, "Failed to watch master election: " + e.toString());
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
log.log(Level.SEVERE, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() {
if (database.hasPendingClusterStateMetaDataStore()) {
log.log(Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores");
return false;
}
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (currentTime < firstAllowedStateBroadcast) {
log.log(Level.FINE, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely.");
firstAllowedStateBroadcast = currentTime;
}
sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired(databaseContext, communicator);
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator);
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : newStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleNewPublishedState(stateBundle);
}
}
newStates.clear();
}
}
if ( ! convergedStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : convergedStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleStateConvergedInCluster(stateBundle);
}
}
convergedStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
if ( ! remoteTasks.isEmpty()) {
final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext();
final RemoteClusterControllerTask task = remoteTasks.poll();
log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName()));
task.doRemoteFleetControllerTask(context);
if (taskMayBeCompletedImmediately(task)) {
log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName()));
task.notifyCompleted();
} else {
log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName()));
tasksPendingStateRecompute.add(task);
}
return true;
}
return false;
}
private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) {
return (!task.hasVersionAckDependency() || task.isFailed() || !masterElectionHandler.isMaster());
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentConsolidatedState = consolidatedClusterState();
context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.masterInfo = masterElectionHandler;
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) {
return bundle.deferredActivation()
? nodeInfo.getClusterStateVersionActivationAcked()
: nodeInfo.getClusterStateVersionBundleAcknowledged();
}
private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) {
var bundle = systemStateBroadcaster.getClusterStateBundle();
if (bundle == null) {
return List.of();
}
return cluster.getNodeInfo().stream().
filter(n -> effectiveActivatedStateVersion(n, bundle) < version).
map(NodeInfo::getNode).
collect(Collectors.toList());
}
private static <E> String stringifyListWithLimits(List<E> list, int limit) {
if (list.size() > limit) {
var sub = list.subList(0, limit);
return String.format("%s (... and %d more)",
sub.stream().map(E::toString).collect(Collectors.joining(", ")),
list.size() - limit);
} else {
return list.stream().map(E::toString).collect(Collectors.joining(", "));
}
}
private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) {
var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion);
if (nodes.isEmpty()) {
return "";
}
return String.format("the following nodes have not converged to at least version %d: %s",
taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages));
}
private boolean completeSatisfiedVersionDependentTasks() {
int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync();
long queueSizeBefore = taskCompletionQueue.size();
final long now = timer.getCurrentTimeInMillis();
while (!taskCompletionQueue.isEmpty()) {
VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek();
if (publishedVersion >= taskCompletion.getMinimumVersion()) {
log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing",
taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else if (taskCompletion.getDeadlineTimePointMs() <= now) {
var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion());
log.log(Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)",
taskCompletion.getTask().getClass().getName(), details));
taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else {
break;
}
}
return (taskCompletionQueue.size() != queueSizeBefore);
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() throws InterruptedException {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = database.loadWantedStates(databaseContext);
didWork |= database.loadStartTimestamps(cluster);
}
didWork |= nodeLookup.updateCluster(cluster, this);
didWork |= stateGatherer.sendMessages(cluster, communicator, this);
didWork |= stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this);
didWork |= recomputeClusterStateIfRequired();
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
stateChangeHandler.setStateChangedFlag();
}
}
isStateGatherer = true;
return didWork;
}
private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) {
systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle));
}
private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) {
return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported();
}
private boolean recomputeClusterStateIfRequired() {
boolean stateWasChanged = false;
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate)
.bucketSpaces(configuredBucketSpaces)
.stateDeriver(createBucketSpaceStateDeriver())
.deferredActivation(options.enableTwoPhaseClusterStateActivation)
.feedBlock(createResourceExhaustionCalculator()
.inferContentClusterFeedBlockOrNull(cluster.getNodeInfo()))
.deriveAndBuild();
stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle);
invokeCandidateStateListeners(candidateBundle);
final long timeNowMs = timer.getCurrentTimeInMillis();
if (hasPassedFirstStateBroadcastTimePoint(timeNowMs)
&& (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper()))
{
final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs);
handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle());
stateWasChanged = true;
}
}
/*
* This works transparently for tasks that end up changing the current cluster state (i.e.
* requiring a new state to be published) and for those whose changes are no-ops (because
* the changes they request are already part of the current state). In the former case the
* tasks will depend on the version that was generated based upon them. In the latter case
* the tasks will depend on the version that is already published (or in the process of
* being published).
*/
scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion());
return stateWasChanged;
}
private ClusterStateDeriver createBucketSpaceStateDeriver() {
if (options.clusterHasGlobalDocumentTypes) {
return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(),
createDefaultSpaceMaintenanceTransitionConstraint());
} else {
return createIdentityClonedBucketSpaceStateDeriver();
}
}
private ResourceExhaustionCalculator createResourceExhaustionCalculator() {
return new ResourceExhaustionCalculator(
options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit,
stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(),
options.clusterFeedBlockNoiseLevel);
}
private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() {
return (state, space) -> state.clone();
}
private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() {
AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle()
.getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState());
return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState());
}
/**
* Move tasks that are dependent on the most recently generated state being published into
* a completion queue with a dependency on the provided version argument. Once that version
* has been ACKed by all distributors in the system, those tasks will be marked as completed.
*/
private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) {
final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis();
for (RemoteClusterControllerTask task : tasksPendingStateRecompute) {
log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d",
task.getClass().getName(), completeAtVersion));
taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs));
}
tasksPendingStateRecompute.clear();
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState,
final ClusterStateBundle toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs)
.maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs()));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean atFirstClusterStateSendTimeEdge() {
if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) {
return false;
}
return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis());
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged()
|| stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged()
|| atFirstClusterStateSendTimeEdge();
}
private boolean handleLeadershipEdgeTransitions() throws InterruptedException {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
stateChangeHandler.setStateChangedFlag();
systemStateBroadcaster.resetBroadcastedClusterStateBundle();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
ClusterStateBundle previousBundle = database.getLatestClusterStateBundle();
database.loadStartTimestamps(cluster);
database.loadWantedStates(databaseContext);
log.info(() -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle));
stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
log.log(Level.FINE, "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
didWork = true;
}
isMaster = true;
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
failAllVersionDependentTasks();
}
wantedStateChanged = false;
isMaster = false;
}
metricUpdater.updateMasterState(isMaster);
return didWork;
}
public void run() {
controllerThreadId = Thread.currentThread().getId();
try {
processingCycle = true;
while( isRunning() ) {
tick();
}
} catch (InterruptedException e) {
log.log(Level.FINE, "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
log.log(Level.SEVERE, "Fatal error killed fleet controller", t);
synchronized (monitor) { running.set(false); }
System.exit(1);
} finally {
running.set(false);
failAllVersionDependentTasks();
synchronized (monitor) { monitor.notifyAll(); }
}
}
public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getClusterStateVersionBundleAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
log.log(Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static final Logger log = Logger.getLogger(FleetController.class.getName());
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private final AtomicBoolean running = new AtomicBoolean(true);
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long lastMetricUpdateCycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<ClusterStateBundle> newStates = new ArrayList<>();
private final List<ClusterStateBundle> convergedStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>();
private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>();
private Set<String> configuredBucketSpaces = Collections.emptySet();
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) {
log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName());
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController create(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
Timer timer = new RealTimer();
MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex);
EventLog log = new EventLog(timer, metricUpdater);
ContentCluster cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution);
NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log);
Communicator communicator = new RPCCommunicator(
RPCCommunicator.createRealSupervisor(),
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
NodeLookup lookUp = new SlobrokClient(timer);
StateChangeHandler stateGenerator = new StateChangeHandler(timer, log);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(
timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
return running.get();
}
public boolean isMaster() {
synchronized (monitor) {
return masterElectionHandler.isMaster();
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public ClusterStateBundle getClusterStateBundle() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterStateBundle();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
log.fine("Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) {
throw new NullPointerException("Cluster state should never be null at this point");
}
listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state)));
ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged();
if (convergedState != null) {
listener.handleStateConvergedInCluster(convergedState);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
if (runner != null && isRunning()) {
log.log(Level.INFO, "Joining event thread.");
running.set(false);
synchronized(monitor) { monitor.notifyAll(); }
runner.join();
}
log.log(Level.INFO, "Fleetcontroller done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(this);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
synchronized(monitor) {
assert(this.options.fleetControllerIndex == options.fleetControllerIndex);
log.log(Level.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo);
stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo);
}
private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) {
if (!options.clusterFeedBlockEnabled) {
return;
}
var calc = createResourceExhaustionCalculator();
var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo);
var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo);
if (!previouslyExhausted.equals(nowExhausted)) {
log.fine(() -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s",
previouslyExhausted, nowExhausted));
stateChangeHandler.setStateChangedFlag();
}
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
@Override
public void handleNewPublishedState(ClusterStateBundle stateBundle) {
verifyInControllerThread();
ClusterState baselineState = stateBundle.getBaselineClusterState();
newStates.add(stateBundle);
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
systemStateBroadcaster.handleNewClusterStates(stateBundle);
if (masterElectionHandler.isMaster()) {
storeClusterStateMetaDataToZooKeeper(stateBundle);
}
}
private boolean maybePublishOldMetrics() {
verifyInControllerThread();
if (cycleCount > 300 + lastMetricUpdateCycleCount) {
ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle();
ClusterState baselineState = stateBundle.getBaselineClusterState();
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
return true;
} else {
return false;
}
}
private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) {
try {
database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion());
database.saveLatestClusterStateBundle(databaseContext, stateBundle);
} catch (InterruptedException e) {
throw new RuntimeException("ZooKeeper write interrupted", e);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indexes of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
log.log(Level.FINEST, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
masterElectionHandler.lostDatabaseConnection();
}
private void failAllVersionDependentTasks() {
tasksPendingStateRecompute.forEach(task -> {
task.handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.notifyCompleted();
});
tasksPendingStateRecompute.clear();
taskCompletionQueue.forEach(task -> {
task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.getTask().notifyCompleted();
});
taskCompletionQueue.clear();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
var currentBundle = stateVersionTracker.getVersionedClusterStateBundle();
log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion()));
stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, context);
convergedStates.add(currentBundle);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) {
return true;
}
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() {
verifyInControllerThread();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
configuredBucketSpaces = Collections.unmodifiableSet(
Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace())
.collect(Collectors.toSet()));
stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio);
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient) {
((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
}
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
database.setZooKeeperAddress(options.zooKeeperServerAddress);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
log.log(Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
log.log(Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
log.log(Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
String hiddenMessage = "";
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTraceAsString(e);
log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() +
": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
private boolean updateMasterElectionState() throws InterruptedException {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
log.log(Level.WARNING, "Failed to watch master election: " + e.toString());
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
log.log(Level.SEVERE, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() {
if (database.hasPendingClusterStateMetaDataStore()) {
log.log(Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores");
return false;
}
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (currentTime < firstAllowedStateBroadcast) {
log.log(Level.FINE, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely.");
firstAllowedStateBroadcast = currentTime;
}
sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired(
databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf());
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator);
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : newStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleNewPublishedState(stateBundle);
}
}
newStates.clear();
}
}
if ( ! convergedStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : convergedStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleStateConvergedInCluster(stateBundle);
}
}
convergedStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
if ( ! remoteTasks.isEmpty()) {
final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext();
final RemoteClusterControllerTask task = remoteTasks.poll();
log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName()));
task.doRemoteFleetControllerTask(context);
if (taskMayBeCompletedImmediately(task)) {
log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName()));
task.notifyCompleted();
} else {
log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName()));
tasksPendingStateRecompute.add(task);
}
return true;
}
return false;
}
private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) {
return (!task.hasVersionAckDependency() || task.isFailed() || !masterElectionHandler.isMaster());
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentConsolidatedState = consolidatedClusterState();
context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.masterInfo = masterElectionHandler;
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) {
return bundle.deferredActivation()
? nodeInfo.getClusterStateVersionActivationAcked()
: nodeInfo.getClusterStateVersionBundleAcknowledged();
}
private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) {
var bundle = systemStateBroadcaster.getClusterStateBundle();
if (bundle == null) {
return List.of();
}
return cluster.getNodeInfo().stream().
filter(n -> effectiveActivatedStateVersion(n, bundle) < version).
map(NodeInfo::getNode).
collect(Collectors.toList());
}
private static <E> String stringifyListWithLimits(List<E> list, int limit) {
if (list.size() > limit) {
var sub = list.subList(0, limit);
return String.format("%s (... and %d more)",
sub.stream().map(E::toString).collect(Collectors.joining(", ")),
list.size() - limit);
} else {
return list.stream().map(E::toString).collect(Collectors.joining(", "));
}
}
private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) {
var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion);
if (nodes.isEmpty()) {
return "";
}
return String.format("the following nodes have not converged to at least version %d: %s",
taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages));
}
private boolean completeSatisfiedVersionDependentTasks() {
int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync();
long queueSizeBefore = taskCompletionQueue.size();
final long now = timer.getCurrentTimeInMillis();
while (!taskCompletionQueue.isEmpty()) {
VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek();
if (publishedVersion >= taskCompletion.getMinimumVersion()) {
log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing",
taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else if (taskCompletion.getDeadlineTimePointMs() <= now) {
var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion());
log.log(Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)",
taskCompletion.getTask().getClass().getName(), details));
taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else {
break;
}
}
return (taskCompletionQueue.size() != queueSizeBefore);
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() throws InterruptedException {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = database.loadWantedStates(databaseContext);
didWork |= database.loadStartTimestamps(cluster);
}
didWork |= nodeLookup.updateCluster(cluster, this);
didWork |= stateGatherer.sendMessages(cluster, communicator, this);
didWork |= stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this);
didWork |= recomputeClusterStateIfRequired();
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
stateChangeHandler.setStateChangedFlag();
}
}
isStateGatherer = true;
return didWork;
}
private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) {
systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle));
}
private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) {
return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported();
}
private boolean recomputeClusterStateIfRequired() {
boolean stateWasChanged = false;
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate)
.bucketSpaces(configuredBucketSpaces)
.stateDeriver(createBucketSpaceStateDeriver())
.deferredActivation(options.enableTwoPhaseClusterStateActivation)
.feedBlock(createResourceExhaustionCalculator()
.inferContentClusterFeedBlockOrNull(cluster.getNodeInfo()))
.deriveAndBuild();
stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle);
invokeCandidateStateListeners(candidateBundle);
final long timeNowMs = timer.getCurrentTimeInMillis();
if (hasPassedFirstStateBroadcastTimePoint(timeNowMs)
&& (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper()))
{
final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs);
handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle());
stateWasChanged = true;
}
}
/*
* This works transparently for tasks that end up changing the current cluster state (i.e.
* requiring a new state to be published) and for those whose changes are no-ops (because
* the changes they request are already part of the current state). In the former case the
* tasks will depend on the version that was generated based upon them. In the latter case
* the tasks will depend on the version that is already published (or in the process of
* being published).
*/
scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion());
return stateWasChanged;
}
private ClusterStateDeriver createBucketSpaceStateDeriver() {
if (options.clusterHasGlobalDocumentTypes) {
return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(),
createDefaultSpaceMaintenanceTransitionConstraint());
} else {
return createIdentityClonedBucketSpaceStateDeriver();
}
}
private ResourceExhaustionCalculator createResourceExhaustionCalculator() {
return new ResourceExhaustionCalculator(
options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit,
stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(),
options.clusterFeedBlockNoiseLevel);
}
private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() {
return (state, space) -> state.clone();
}
private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() {
AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle()
.getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState());
return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState());
}
/**
* Move tasks that are dependent on the most recently generated state being published into
* a completion queue with a dependency on the provided version argument. Once that version
* has been ACKed by all distributors in the system, those tasks will be marked as completed.
*/
private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) {
final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis();
for (RemoteClusterControllerTask task : tasksPendingStateRecompute) {
log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d",
task.getClass().getName(), completeAtVersion));
taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs));
}
tasksPendingStateRecompute.clear();
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState,
final ClusterStateBundle toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs)
.maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs()));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean atFirstClusterStateSendTimeEdge() {
if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) {
return false;
}
return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis());
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged()
|| stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged()
|| atFirstClusterStateSendTimeEdge();
}
private boolean handleLeadershipEdgeTransitions() throws InterruptedException {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
stateChangeHandler.setStateChangedFlag();
systemStateBroadcaster.resetBroadcastedClusterStateBundle();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
ClusterStateBundle previousBundle = database.getLatestClusterStateBundle();
database.loadStartTimestamps(cluster);
database.loadWantedStates(databaseContext);
log.info(() -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle));
stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
log.log(Level.FINE, "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
didWork = true;
}
isMaster = true;
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
failAllVersionDependentTasks();
}
wantedStateChanged = false;
isMaster = false;
}
metricUpdater.updateMasterState(isMaster);
return didWork;
}
public void run() {
controllerThreadId = Thread.currentThread().getId();
try {
processingCycle = true;
while( isRunning() ) {
tick();
}
} catch (InterruptedException e) {
log.log(Level.FINE, "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
log.log(Level.SEVERE, "Fatal error killed fleet controller", t);
synchronized (monitor) { running.set(false); }
System.exit(1);
} finally {
running.set(false);
failAllVersionDependentTasks();
synchronized (monitor) { monitor.notifyAll(); }
}
}
public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getClusterStateVersionBundleAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
log.log(Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} |
We usually prefer synchronizing on an explicit monitor instead of `this`. Callers might choose to synchronize on a reference to this as well, outside the control of this code (especially since this is in a "library" module). | public T get() {
synchronized (this) {
if (clock.instant().isAfter(nextRefresh))
refresh();
}
return value;
} | synchronized (this) { | public T get() {
synchronized (monitor) {
if (clock.instant().isAfter(nextRefresh)) {
this.value = delegate.get();
this.nextRefresh = clock.instant().plus(period);
}
}
return value;
} | class CachedSupplier<T> implements Supplier<T> {
private final Supplier<T> delegate;
private final Duration period;
private final Clock clock;
private Instant nextRefresh;
private volatile T value;
public CachedSupplier(Supplier<T> delegate, Duration period) {
this(delegate, period, Clock.systemUTC());
}
CachedSupplier(Supplier<T> delegate, Duration period, Clock clock) {
this.delegate = delegate;
this.period = period;
this.clock = clock;
this.nextRefresh = Instant.MIN;
}
@Override
public void refresh() {
synchronized (this) {
this.value = delegate.get();
this.nextRefresh = clock.instant().plus(period);
}
}
} | class CachedSupplier<T> implements Supplier<T> {
private final Object monitor = new Object();
private final Supplier<T> delegate;
private final Duration period;
private final Clock clock;
private Instant nextRefresh;
private volatile T value;
public CachedSupplier(Supplier<T> delegate, Duration period) {
this(delegate, period, Clock.systemUTC());
}
CachedSupplier(Supplier<T> delegate, Duration period, Clock clock) {
this.delegate = delegate;
this.period = period;
this.clock = clock;
this.nextRefresh = Instant.MIN;
}
@Override
public void invalidate() {
synchronized (monitor) {
this.nextRefresh = Instant.MIN;
}
}
} |
Any reason to not use `java.net.URI` for the URIs? Then you get (some) validation and normalization for free. | static String normalizeUri(String uri) {
if (!uri.endsWith("/")) uri = uri + "/";
if (!validUriPattern.matcher(uri).matches())
throw new IllegalArgumentException("Invalid archive URI: " + uri);
return uri;
} | if (!uri.endsWith("/")) uri = uri + "/"; | static String normalizeUri(String uri) {
if (!uri.endsWith("/")) uri = uri + "/";
if (!validUriPattern.matcher(uri).matches())
throw new IllegalArgumentException("Invalid archive URI: " + uri);
return uri;
} | class ArchiveUris {
private static final Logger log = Logger.getLogger(ArchiveUris.class.getName());
private static final Pattern validUriPattern = Pattern.compile("[a-z0-9]+:
private static final Duration cacheTtl = Duration.ofMinutes(1);
private final CuratorDatabaseClient db;
private final CachedSupplier<Map<TenantName, String>> archiveUris;
public ArchiveUris(CuratorDatabaseClient db) {
this.db = db;
this.archiveUris = new CachedSupplier<>(db::readArchiveUris, cacheTtl);
}
/** Returns the current archive URI for each tenant */
public Map<TenantName, String> getArchiveUris() {
return archiveUris.get();
}
/** Returns the archive URI to use for given tenant */
public Optional<String> archiveUriFor(TenantName tenant) {
return Optional.ofNullable(archiveUris.get().get(tenant));
}
/** Returns the archive URI to use for given node */
public Optional<String> archiveUriFor(Node node) {
return node.allocation().map(Allocation::owner)
.flatMap(app -> archiveUriFor(app.tenant())
.map(uri -> {
StringBuilder sb = new StringBuilder(100).append(uri)
.append(app.application().value()).append('/')
.append(app.instance().value()).append('/');
for (char c: node.hostname().toCharArray()) {
if (c == '.') break;
sb.append(c);
}
return sb.append('/').toString();
}));
}
/** Set the docker image for nodes of given type */
public void setArchiveUri(TenantName tenant, Optional<String> archiveUri) {
try (Lock lock = db.lockArchiveUris()) {
Map<TenantName, String> archiveUris = new TreeMap<>(db.readArchiveUris());
if (Optional.ofNullable(archiveUris.get(tenant)).equals(archiveUri)) return;
archiveUri.map(ArchiveUris::normalizeUri).ifPresentOrElse(uri -> archiveUris.put(tenant, uri),
() -> archiveUris.remove(tenant));
db.writeArchiveUris(archiveUris);
this.archiveUris.refresh();
log.info("Set archive URI for " + tenant + " to " + archiveUri.orElse(null));
}
}
} | class ArchiveUris {
private static final Logger log = Logger.getLogger(ArchiveUris.class.getName());
private static final Pattern validUriPattern = Pattern.compile("[a-z0-9]+:
private static final Duration cacheTtl = Duration.ofMinutes(1);
private final CuratorDatabaseClient db;
private final CachedSupplier<Map<TenantName, String>> archiveUris;
public ArchiveUris(CuratorDatabaseClient db) {
this.db = db;
this.archiveUris = new CachedSupplier<>(db::readArchiveUris, cacheTtl);
}
/** Returns the current archive URI for each tenant */
public Map<TenantName, String> getArchiveUris() {
return archiveUris.get();
}
/** Returns the archive URI to use for given tenant */
public Optional<String> archiveUriFor(TenantName tenant) {
return Optional.ofNullable(archiveUris.get().get(tenant));
}
/** Returns the archive URI to use for given node */
public Optional<String> archiveUriFor(Node node) {
return node.allocation().map(Allocation::owner)
.flatMap(app -> archiveUriFor(app.tenant())
.map(uri -> {
StringBuilder sb = new StringBuilder(100).append(uri)
.append(app.application().value()).append('/')
.append(app.instance().value()).append('/');
for (char c: node.hostname().toCharArray()) {
if (c == '.') break;
sb.append(c);
}
return sb.append('/').toString();
}));
}
/** Set the docker image for nodes of given type */
public void setArchiveUri(TenantName tenant, Optional<String> archiveUri) {
try (Lock lock = db.lockArchiveUris()) {
Map<TenantName, String> archiveUris = new TreeMap<>(db.readArchiveUris());
if (Optional.ofNullable(archiveUris.get(tenant)).equals(archiveUri)) return;
archiveUri.map(ArchiveUris::normalizeUri).ifPresentOrElse(uri -> archiveUris.put(tenant, uri),
() -> archiveUris.remove(tenant));
db.writeArchiveUris(archiveUris);
this.archiveUris.invalidate();
log.info("Set archive URI for " + tenant + " to " + archiveUri.orElse(null));
}
}
} |
`java.net.URI` is just way to permissive, f.ex. all of the invalid URIs from my test are legal `java.net.URI`. With the regex above we can limit it down just to the format we want it. | static String normalizeUri(String uri) {
if (!uri.endsWith("/")) uri = uri + "/";
if (!validUriPattern.matcher(uri).matches())
throw new IllegalArgumentException("Invalid archive URI: " + uri);
return uri;
} | if (!uri.endsWith("/")) uri = uri + "/"; | static String normalizeUri(String uri) {
if (!uri.endsWith("/")) uri = uri + "/";
if (!validUriPattern.matcher(uri).matches())
throw new IllegalArgumentException("Invalid archive URI: " + uri);
return uri;
} | class ArchiveUris {
private static final Logger log = Logger.getLogger(ArchiveUris.class.getName());
private static final Pattern validUriPattern = Pattern.compile("[a-z0-9]+:
private static final Duration cacheTtl = Duration.ofMinutes(1);
private final CuratorDatabaseClient db;
private final CachedSupplier<Map<TenantName, String>> archiveUris;
public ArchiveUris(CuratorDatabaseClient db) {
this.db = db;
this.archiveUris = new CachedSupplier<>(db::readArchiveUris, cacheTtl);
}
/** Returns the current archive URI for each tenant */
public Map<TenantName, String> getArchiveUris() {
return archiveUris.get();
}
/** Returns the archive URI to use for given tenant */
public Optional<String> archiveUriFor(TenantName tenant) {
return Optional.ofNullable(archiveUris.get().get(tenant));
}
/** Returns the archive URI to use for given node */
public Optional<String> archiveUriFor(Node node) {
return node.allocation().map(Allocation::owner)
.flatMap(app -> archiveUriFor(app.tenant())
.map(uri -> {
StringBuilder sb = new StringBuilder(100).append(uri)
.append(app.application().value()).append('/')
.append(app.instance().value()).append('/');
for (char c: node.hostname().toCharArray()) {
if (c == '.') break;
sb.append(c);
}
return sb.append('/').toString();
}));
}
/** Set the docker image for nodes of given type */
public void setArchiveUri(TenantName tenant, Optional<String> archiveUri) {
try (Lock lock = db.lockArchiveUris()) {
Map<TenantName, String> archiveUris = new TreeMap<>(db.readArchiveUris());
if (Optional.ofNullable(archiveUris.get(tenant)).equals(archiveUri)) return;
archiveUri.map(ArchiveUris::normalizeUri).ifPresentOrElse(uri -> archiveUris.put(tenant, uri),
() -> archiveUris.remove(tenant));
db.writeArchiveUris(archiveUris);
this.archiveUris.refresh();
log.info("Set archive URI for " + tenant + " to " + archiveUri.orElse(null));
}
}
} | class ArchiveUris {
private static final Logger log = Logger.getLogger(ArchiveUris.class.getName());
private static final Pattern validUriPattern = Pattern.compile("[a-z0-9]+:
private static final Duration cacheTtl = Duration.ofMinutes(1);
private final CuratorDatabaseClient db;
private final CachedSupplier<Map<TenantName, String>> archiveUris;
public ArchiveUris(CuratorDatabaseClient db) {
this.db = db;
this.archiveUris = new CachedSupplier<>(db::readArchiveUris, cacheTtl);
}
/** Returns the current archive URI for each tenant */
public Map<TenantName, String> getArchiveUris() {
return archiveUris.get();
}
/** Returns the archive URI to use for given tenant */
public Optional<String> archiveUriFor(TenantName tenant) {
return Optional.ofNullable(archiveUris.get().get(tenant));
}
/** Returns the archive URI to use for given node */
public Optional<String> archiveUriFor(Node node) {
return node.allocation().map(Allocation::owner)
.flatMap(app -> archiveUriFor(app.tenant())
.map(uri -> {
StringBuilder sb = new StringBuilder(100).append(uri)
.append(app.application().value()).append('/')
.append(app.instance().value()).append('/');
for (char c: node.hostname().toCharArray()) {
if (c == '.') break;
sb.append(c);
}
return sb.append('/').toString();
}));
}
/** Set the docker image for nodes of given type */
public void setArchiveUri(TenantName tenant, Optional<String> archiveUri) {
try (Lock lock = db.lockArchiveUris()) {
Map<TenantName, String> archiveUris = new TreeMap<>(db.readArchiveUris());
if (Optional.ofNullable(archiveUris.get(tenant)).equals(archiveUri)) return;
archiveUri.map(ArchiveUris::normalizeUri).ifPresentOrElse(uri -> archiveUris.put(tenant, uri),
() -> archiveUris.remove(tenant));
db.writeArchiveUris(archiveUris);
this.archiveUris.invalidate();
log.info("Set archive URI for " + tenant + " to " + archiveUri.orElse(null));
}
}
} |
Sure, but I still think using `java.net.URI` for something that is an URI is better. | static String normalizeUri(String uri) {
if (!uri.endsWith("/")) uri = uri + "/";
if (!validUriPattern.matcher(uri).matches())
throw new IllegalArgumentException("Invalid archive URI: " + uri);
return uri;
} | if (!uri.endsWith("/")) uri = uri + "/"; | static String normalizeUri(String uri) {
if (!uri.endsWith("/")) uri = uri + "/";
if (!validUriPattern.matcher(uri).matches())
throw new IllegalArgumentException("Invalid archive URI: " + uri);
return uri;
} | class ArchiveUris {
private static final Logger log = Logger.getLogger(ArchiveUris.class.getName());
private static final Pattern validUriPattern = Pattern.compile("[a-z0-9]+:
private static final Duration cacheTtl = Duration.ofMinutes(1);
private final CuratorDatabaseClient db;
private final CachedSupplier<Map<TenantName, String>> archiveUris;
public ArchiveUris(CuratorDatabaseClient db) {
this.db = db;
this.archiveUris = new CachedSupplier<>(db::readArchiveUris, cacheTtl);
}
/** Returns the current archive URI for each tenant */
public Map<TenantName, String> getArchiveUris() {
return archiveUris.get();
}
/** Returns the archive URI to use for given tenant */
public Optional<String> archiveUriFor(TenantName tenant) {
return Optional.ofNullable(archiveUris.get().get(tenant));
}
/** Returns the archive URI to use for given node */
public Optional<String> archiveUriFor(Node node) {
return node.allocation().map(Allocation::owner)
.flatMap(app -> archiveUriFor(app.tenant())
.map(uri -> {
StringBuilder sb = new StringBuilder(100).append(uri)
.append(app.application().value()).append('/')
.append(app.instance().value()).append('/');
for (char c: node.hostname().toCharArray()) {
if (c == '.') break;
sb.append(c);
}
return sb.append('/').toString();
}));
}
/** Set the docker image for nodes of given type */
public void setArchiveUri(TenantName tenant, Optional<String> archiveUri) {
try (Lock lock = db.lockArchiveUris()) {
Map<TenantName, String> archiveUris = new TreeMap<>(db.readArchiveUris());
if (Optional.ofNullable(archiveUris.get(tenant)).equals(archiveUri)) return;
archiveUri.map(ArchiveUris::normalizeUri).ifPresentOrElse(uri -> archiveUris.put(tenant, uri),
() -> archiveUris.remove(tenant));
db.writeArchiveUris(archiveUris);
this.archiveUris.refresh();
log.info("Set archive URI for " + tenant + " to " + archiveUri.orElse(null));
}
}
} | class ArchiveUris {
private static final Logger log = Logger.getLogger(ArchiveUris.class.getName());
private static final Pattern validUriPattern = Pattern.compile("[a-z0-9]+:
private static final Duration cacheTtl = Duration.ofMinutes(1);
private final CuratorDatabaseClient db;
private final CachedSupplier<Map<TenantName, String>> archiveUris;
public ArchiveUris(CuratorDatabaseClient db) {
this.db = db;
this.archiveUris = new CachedSupplier<>(db::readArchiveUris, cacheTtl);
}
/** Returns the current archive URI for each tenant */
public Map<TenantName, String> getArchiveUris() {
return archiveUris.get();
}
/** Returns the archive URI to use for given tenant */
public Optional<String> archiveUriFor(TenantName tenant) {
return Optional.ofNullable(archiveUris.get().get(tenant));
}
/** Returns the archive URI to use for given node */
public Optional<String> archiveUriFor(Node node) {
return node.allocation().map(Allocation::owner)
.flatMap(app -> archiveUriFor(app.tenant())
.map(uri -> {
StringBuilder sb = new StringBuilder(100).append(uri)
.append(app.application().value()).append('/')
.append(app.instance().value()).append('/');
for (char c: node.hostname().toCharArray()) {
if (c == '.') break;
sb.append(c);
}
return sb.append('/').toString();
}));
}
/** Set the docker image for nodes of given type */
public void setArchiveUri(TenantName tenant, Optional<String> archiveUri) {
try (Lock lock = db.lockArchiveUris()) {
Map<TenantName, String> archiveUris = new TreeMap<>(db.readArchiveUris());
if (Optional.ofNullable(archiveUris.get(tenant)).equals(archiveUri)) return;
archiveUri.map(ArchiveUris::normalizeUri).ifPresentOrElse(uri -> archiveUris.put(tenant, uri),
() -> archiveUris.remove(tenant));
db.writeArchiveUris(archiveUris);
this.archiveUris.invalidate();
log.info("Set archive URI for " + tenant + " to " + archiveUri.orElse(null));
}
}
} |
That is true, decided against it since it is only used in REST API, and even before that, the URI needs to be extended, so we would have to do a lot of `URI::create` and `URI::toString` to do anything with it. | static String normalizeUri(String uri) {
if (!uri.endsWith("/")) uri = uri + "/";
if (!validUriPattern.matcher(uri).matches())
throw new IllegalArgumentException("Invalid archive URI: " + uri);
return uri;
} | if (!uri.endsWith("/")) uri = uri + "/"; | static String normalizeUri(String uri) {
if (!uri.endsWith("/")) uri = uri + "/";
if (!validUriPattern.matcher(uri).matches())
throw new IllegalArgumentException("Invalid archive URI: " + uri);
return uri;
} | class ArchiveUris {
private static final Logger log = Logger.getLogger(ArchiveUris.class.getName());
private static final Pattern validUriPattern = Pattern.compile("[a-z0-9]+:
private static final Duration cacheTtl = Duration.ofMinutes(1);
private final CuratorDatabaseClient db;
private final CachedSupplier<Map<TenantName, String>> archiveUris;
public ArchiveUris(CuratorDatabaseClient db) {
this.db = db;
this.archiveUris = new CachedSupplier<>(db::readArchiveUris, cacheTtl);
}
/** Returns the current archive URI for each tenant */
public Map<TenantName, String> getArchiveUris() {
return archiveUris.get();
}
/** Returns the archive URI to use for given tenant */
public Optional<String> archiveUriFor(TenantName tenant) {
return Optional.ofNullable(archiveUris.get().get(tenant));
}
/** Returns the archive URI to use for given node */
public Optional<String> archiveUriFor(Node node) {
return node.allocation().map(Allocation::owner)
.flatMap(app -> archiveUriFor(app.tenant())
.map(uri -> {
StringBuilder sb = new StringBuilder(100).append(uri)
.append(app.application().value()).append('/')
.append(app.instance().value()).append('/');
for (char c: node.hostname().toCharArray()) {
if (c == '.') break;
sb.append(c);
}
return sb.append('/').toString();
}));
}
/** Set the docker image for nodes of given type */
public void setArchiveUri(TenantName tenant, Optional<String> archiveUri) {
try (Lock lock = db.lockArchiveUris()) {
Map<TenantName, String> archiveUris = new TreeMap<>(db.readArchiveUris());
if (Optional.ofNullable(archiveUris.get(tenant)).equals(archiveUri)) return;
archiveUri.map(ArchiveUris::normalizeUri).ifPresentOrElse(uri -> archiveUris.put(tenant, uri),
() -> archiveUris.remove(tenant));
db.writeArchiveUris(archiveUris);
this.archiveUris.refresh();
log.info("Set archive URI for " + tenant + " to " + archiveUri.orElse(null));
}
}
} | class ArchiveUris {
private static final Logger log = Logger.getLogger(ArchiveUris.class.getName());
private static final Pattern validUriPattern = Pattern.compile("[a-z0-9]+:
private static final Duration cacheTtl = Duration.ofMinutes(1);
private final CuratorDatabaseClient db;
private final CachedSupplier<Map<TenantName, String>> archiveUris;
public ArchiveUris(CuratorDatabaseClient db) {
this.db = db;
this.archiveUris = new CachedSupplier<>(db::readArchiveUris, cacheTtl);
}
/** Returns the current archive URI for each tenant */
public Map<TenantName, String> getArchiveUris() {
return archiveUris.get();
}
/** Returns the archive URI to use for given tenant */
public Optional<String> archiveUriFor(TenantName tenant) {
return Optional.ofNullable(archiveUris.get().get(tenant));
}
/** Returns the archive URI to use for given node */
public Optional<String> archiveUriFor(Node node) {
return node.allocation().map(Allocation::owner)
.flatMap(app -> archiveUriFor(app.tenant())
.map(uri -> {
StringBuilder sb = new StringBuilder(100).append(uri)
.append(app.application().value()).append('/')
.append(app.instance().value()).append('/');
for (char c: node.hostname().toCharArray()) {
if (c == '.') break;
sb.append(c);
}
return sb.append('/').toString();
}));
}
/** Set the docker image for nodes of given type */
public void setArchiveUri(TenantName tenant, Optional<String> archiveUri) {
try (Lock lock = db.lockArchiveUris()) {
Map<TenantName, String> archiveUris = new TreeMap<>(db.readArchiveUris());
if (Optional.ofNullable(archiveUris.get(tenant)).equals(archiveUri)) return;
archiveUri.map(ArchiveUris::normalizeUri).ifPresentOrElse(uri -> archiveUris.put(tenant, uri),
() -> archiveUris.remove(tenant));
db.writeArchiveUris(archiveUris);
this.archiveUris.invalidate();
log.info("Set archive URI for " + tenant + " to " + archiveUri.orElse(null));
}
}
} |
We could make this a normal GET that just uses the values from the persisted settings. | private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
} | if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); | private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Tenant '" + tenant + "' is not a cloud tenant");
var cloudTenant = (CloudTenant)controller.tenants().require(tenant);
var tenantSecretStore = cloudTenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
var deployment = getActiveDeployment(tenant);
if (deployment.isEmpty())
return ErrorResponse.badRequest("Tenant '" + tenantName + "' has no active deployments");
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + name + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deployment.get(), tenantSecretStore.get());
return new MessageResponse(response);
}
private Optional<DeploymentId> getActiveDeployment(TenantName tenant) {
for (var application : controller.applications().asList(tenant)) {
var optionalInstance = application.instances().values()
.stream()
.filter(instance -> instance.deployments().keySet().size() > 0)
.findFirst();
if (optionalInstance.isPresent()) {
var instance = optionalInstance.get();
var applicationId = instance.id();
var zoneId = instance.deployments().keySet().stream().findFirst().orElseThrow();
return Optional.of(new DeploymentId(applicationId, zoneId));
}
}
return Optional.empty();
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is invalid"));
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is already configured"));
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Configured secret store: " + tenantSecretStore);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.SEVERE, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone)
.not().legacy();
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(zone),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
Cursor secretStore = object.setArray("secretStores");
cloudTenant.tenantSecretStores().forEach(store -> {
Cursor storeObject = secretStore.addObject();
storeObject.setString("name", store.getName());
storeObject.setString("awsId", store.getAwsId());
storeObject.setString("role", store.getRole());
});
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(com.yahoo.vespa.hosted.controller.Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
tenantMetaDataToSlime(tenant, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("disk", utilization.disk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) {
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> controller.jobController().jobs(instance.id()).stream()
.filter(jobType -> jobType.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder());
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"));
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String name) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Tenant '" + tenant + "' is not a cloud tenant");
var cloudTenant = (CloudTenant)controller.tenants().require(tenant);
var tenantSecretStore = cloudTenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
var deployment = getActiveDeployment(tenant);
if (deployment.isEmpty())
return ErrorResponse.badRequest("Tenant '" + tenantName + "' has no active deployments");
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + name + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deployment.get(), tenantSecretStore.get());
return new MessageResponse(response);
}
private Optional<DeploymentId> getActiveDeployment(TenantName tenant) {
for (var application : controller.applications().asList(tenant)) {
var optionalInstance = application.instances().values()
.stream()
.filter(instance -> instance.deployments().keySet().size() > 0)
.findFirst();
if (optionalInstance.isPresent()) {
var instance = optionalInstance.get();
var applicationId = instance.id();
var zoneId = instance.deployments().keySet().stream().findFirst().orElseThrow();
return Optional.of(new DeploymentId(applicationId, zoneId));
}
}
return Optional.empty();
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is invalid"));
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is already configured"));
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Configured secret store: " + tenantSecretStore);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.SEVERE, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone)
.not().legacy();
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(zone),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
Cursor secretStore = object.setArray("secretStores");
cloudTenant.tenantSecretStores().forEach(store -> {
Cursor storeObject = secretStore.addObject();
storeObject.setString("name", store.getName());
storeObject.setString("awsId", store.getAwsId());
storeObject.setString("role", store.getRole());
});
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(com.yahoo.vespa.hosted.controller.Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
tenantMetaDataToSlime(tenant, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("disk", utilization.disk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) {
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> controller.jobController().jobs(instance.id()).stream()
.filter(jobType -> jobType.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder());
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} |
This should never happen? | List<Integer> provisionIndices(int count) {
if (count == 0) return List.of();
NodeType hostType = requestedNodes.type().hostType();
if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count);
int offset = 1;
Set<Integer> currentIndices = allNodes.nodeType(hostType)
.stream()
.map(node -> node.allocation().get().membership().index())
.map(index -> index + offset)
.collect(Collectors.toSet());
List<Integer> indices = new ArrayList<>(count);
for (int i = offset; indices.size() < count; i++) {
if (!currentIndices.contains(i)) {
indices.add(i);
}
}
return indices;
} | if (count == 0) return List.of(); | List<Integer> provisionIndices(int count) {
if (count < 1) throw new IllegalArgumentException("Count must be positive");
NodeType hostType = requestedNodes.type().hostType();
if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count);
Set<Integer> currentIndices = allNodes.nodeType(hostType)
.stream()
.map(Node::hostname)
.map(NodeAllocation::parseIndex)
.collect(Collectors.toSet());
List<Integer> indices = new ArrayList<>(count);
for (int i = 1; indices.size() < count; i++) {
if (!currentIndices.contains(i)) {
indices.add(i);
}
}
return indices;
} | class NodeAllocation {
/** List of all nodes in node-repository */
private final NodeList allNodes;
/** The application this list is for */
private final ApplicationId application;
/** The cluster this list is for */
private final ClusterSpec cluster;
/** The requested nodes of this list */
private final NodeSpec requestedNodes;
/** The node candidates this has accepted so far, keyed on hostname */
private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>();
/** The number of already allocated nodes accepted and not retired */
private int accepted = 0;
/** The number of already allocated nodes accepted and not retired and not needing resize */
private int acceptedWithoutResizingRetired = 0;
/** The number of nodes rejected because of clashing parentHostname */
private int rejectedDueToClashingParentHost = 0;
/** The number of nodes rejected due to exclusivity constraints */
private int rejectedDueToExclusivity = 0;
private int rejectedDueToInsufficientRealResources = 0;
/** The number of nodes that just now was changed to retired */
private int wasRetiredJustNow = 0;
/** The node indexes to verify uniqueness of each members index */
private final Set<Integer> indexes = new HashSet<>();
/** The next membership index to assign to a new node */
private final Supplier<Integer> nextIndex;
private final NodeRepository nodeRepository;
private final NodeResourceLimits nodeResourceLimits;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
Supplier<Integer> nextIndex, NodeRepository nodeRepository) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
this.requestedNodes = requestedNodes;
this.nextIndex = nextIndex;
this.nodeRepository = nodeRepository;
nodeResourceLimits = new NodeResourceLimits(nodeRepository);
}
/**
* Offer some nodes to this. The nodes may have an allocation to a different application or cluster,
* an allocation to this cluster, or no current allocation (in which case one is assigned).
*
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
* @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc.
* @return the subset of offeredNodes which was accepted, with the correct allocation assigned
*/
List<Node> offer(List<NodeCandidate> nodesPrioritized) {
List<Node> accepted = new ArrayList<>();
for (NodeCandidate candidate : nodesPrioritized) {
if (candidate.allocation().isPresent()) {
Allocation allocation = candidate.allocation().get();
ClusterMembership membership = allocation.membership();
if ( ! allocation.owner().equals(application)) continue;
if ( ! membership.cluster().satisfies(cluster)) continue;
if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue;
if ( candidate.state() == Node.State.active && allocation.isRemovable()) continue;
if ( indexes.contains(membership.index())) continue;
boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable;
boolean acceptToRetire = acceptToRetire(candidate);
if ((! saturated() && hasCompatibleFlavor(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) {
candidate = candidate.withNode();
if (candidate.isValid())
accepted.add(acceptNode(candidate, shouldRetire(candidate, nodesPrioritized), resizeable));
}
}
else if (! saturated() && hasCompatibleFlavor(candidate)) {
if (requestedNodes.type() == NodeType.tenant && ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
if ( violatesParentHostPolicy(candidate)) {
++rejectedDueToClashingParentHost;
continue;
}
if ( violatesExclusivity(candidate)) {
++rejectedDueToExclusivity;
continue;
}
if (candidate.wantToRetire()) {
continue;
}
candidate = candidate.allocate(application,
ClusterMembership.from(cluster, nextIndex.get()),
requestedNodes.resources().orElse(candidate.resources()),
nodeRepository.clock().instant());
if (candidate.isValid())
accepted.add(acceptNode(candidate, false, false));
}
}
return accepted;
}
private boolean shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) {
if ( ! requestedNodes.considerRetiring())
return candidate.allocation().map(a -> a.membership().retired()).orElse(false);
if ( ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) return true;
if (violatesParentHostPolicy(candidate)) return true;
if ( ! hasCompatibleFlavor(candidate)) return true;
if (candidate.wantToRetire()) return true;
if (candidate.preferToRetire() && candidate.replacableBy(candidates)) return true;
if (violatesExclusivity(candidate)) return true;
return false;
}
private boolean violatesParentHostPolicy(NodeCandidate candidate) {
return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate);
}
private boolean checkForClashingParentHost() {
return nodeRepository.zone().system() == SystemName.main &&
nodeRepository.zone().environment().isProduction() &&
! application.instance().isTester();
}
private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) {
for (NodeCandidate acceptedNode : nodes.values()) {
if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() &&
acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) {
return true;
}
}
return false;
}
private boolean violatesExclusivity(NodeCandidate candidate) {
if (candidate.parentHostname().isEmpty()) return false;
if (nodeRepository.zone().getCloud().dynamicProvisioning())
return requestedNodes.isExclusive() &&
! candidate.parent.flatMap(Node::exclusiveTo).map(application::equals).orElse(false);
for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) {
if (nodeOnHost.allocation().isEmpty()) continue;
if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) {
if ( ! nodeOnHost.allocation().get().owner().equals(application)) return true;
}
}
return false;
}
/**
* Returns whether this node should be accepted into the cluster even if it is not currently desired
* (already enough nodes, or wrong flavor).
* Such nodes will be marked retired during finalization of the list of accepted nodes.
* The conditions for this are:
*
* This is a stateful node. These must always be retired before being removed to allow the cluster to
* migrate away data.
*
* This is a container node and it is not desired due to having the wrong flavor. In this case this
* will (normally) obtain for all the current nodes in the cluster and so retiring before removing must
* be used to avoid removing all the current nodes at once, before the newly allocated replacements are
* initialized. (In the other case, where a container node is not desired because we have enough nodes we
* do want to remove it immediately to get immediate feedback on how the size reduction works out.)
*/
private boolean acceptToRetire(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false;
if (candidate.allocation().get().membership().retired()) return true;
if (! requestedNodes.considerRetiring()) return false;
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleFlavor(candidate));
}
private boolean hasCompatibleFlavor(NodeCandidate candidate) {
return requestedNodes.isCompatible(candidate.flavor(), nodeRepository.flavors()) || candidate.isResizable;
}
private Node acceptNode(NodeCandidate candidate, boolean shouldRetire, boolean resizeable) {
Node node = candidate.toNode();
if (node.allocation().isPresent())
node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources())));
if (! shouldRetire) {
accepted++;
if (node.allocation().isEmpty()
|| ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired()))
acceptedWithoutResizingRetired++;
if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired()))
node = resize(node);
if (node.state() != Node.State.active)
node = node.unretire().removable(false);
} else {
++wasRetiredJustNow;
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
node = setCluster(cluster, node);
}
candidate = candidate.withNode(node);
indexes.add(node.allocation().get().membership().index());
nodes.put(node.hostname(), candidate);
return node;
}
private Node resize(Node node) {
NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
return node.with(new Flavor(requestedNodes.resources().get()
.with(hostResources.diskSpeed())
.with(hostResources.storageType())));
}
private Node setCluster(ClusterSpec cluster, Node node) {
ClusterMembership membership = node.allocation().get().membership().with(cluster);
return node.with(node.allocation().get().with(membership));
}
/** Returns true if no more nodes are needed in this list */
private boolean saturated() {
return requestedNodes.saturatedBy(acceptedWithoutResizingRetired);
}
/** Returns true if the content of this list is sufficient to meet the request */
boolean fulfilled() {
return requestedNodes.fulfilledBy(accepted());
}
/** Returns true if this allocation was already fulfilled and resulted in no new changes */
public boolean fulfilledAndNoChanges() {
return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty();
}
/**
* Returns {@link FlavorCount} describing the node deficit for the given {@link NodeSpec}.
*
* @return empty if the requested spec is already fulfilled. Otherwise returns {@link FlavorCount} containing the
* flavor and node count required to cover the deficit.
*/
Optional<FlavorCount> nodeDeficit() {
if (nodeType() != NodeType.config && nodeType() != NodeType.tenant) {
return Optional.empty();
}
return Optional.of(new FlavorCount(requestedNodes.resources().orElseGet(NodeResources::unspecified),
requestedNodes.fulfilledDeficitCount(accepted())))
.filter(flavorCount -> flavorCount.getCount() > 0);
}
/** Returns the indices to use when provisioning hosts for this */
/** The node type this is allocating */
NodeType nodeType() {
return requestedNodes.type();
}
/**
* Make the number of <i>non-retired</i> nodes in the list equal to the requested number
* of nodes, and retire the rest of the list. Only retire currently active nodes.
* Prefer to retire nodes of the wrong flavor.
* Make as few changes to the retired set as possible.
*
* @return the final list of nodes
*/
List<Node> finalNodes() {
int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count();
int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), currentRetiredCount) - currentRetiredCount;
if (deltaRetiredCount > 0) {
for (NodeCandidate candidate : byRetiringPriority(nodes.values())) {
if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) {
candidate = candidate.withNode();
candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant()));
nodes.put(candidate.toNode().hostname(), candidate);
if (--deltaRetiredCount == 0) break;
}
}
}
else if (deltaRetiredCount < 0) {
for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) {
if ( candidate.allocation().get().membership().retired() && hasCompatibleFlavor(candidate) ) {
candidate = candidate.withNode();
if (candidate.isResizable)
candidate = candidate.withNode(resize(candidate.toNode()));
candidate = candidate.withNode(candidate.toNode().unretire());
nodes.put(candidate.toNode().hostname(), candidate);
if (++deltaRetiredCount == 0) break;
}
}
}
for (NodeCandidate candidate : nodes.values()) {
candidate = candidate.withNode();
Allocation allocation = candidate.allocation().get();
candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership()
.with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive())))));
nodes.put(candidate.toNode().hostname(), candidate);
}
return nodes.values().stream().map(n -> n.toNode()).collect(Collectors.toList());
}
List<Node> reservableNodes() {
EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved);
return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state()));
}
List<Node> newNodes() {
return nodesFilter(n -> n.isNew);
}
private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) {
return nodes.values().stream()
.filter(predicate)
.map(n -> n.toNode())
.collect(Collectors.toList());
}
/** Returns the number of nodes accepted this far */
private int accepted() {
if (nodeType() == NodeType.tenant) return accepted;
return allNodes.nodeType(nodeType()).size();
}
/** Prefer to retire nodes we want the least */
private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream().sorted(Comparator.reverseOrder()).collect(Collectors.toList());
}
/** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */
private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream()
.sorted(Comparator.comparing(NodeCandidate::wantToRetire)
.thenComparing(n -> n.allocation().get().membership().index()))
.collect(Collectors.toList());
}
public String outOfCapacityDetails() {
List<String> reasons = new ArrayList<>();
if (rejectedDueToExclusivity > 0)
reasons.add("host exclusivity constraints");
if (rejectedDueToClashingParentHost > 0)
reasons.add("insufficient nodes available on separate physical hosts");
if (wasRetiredJustNow > 0)
reasons.add("retirement of allocated nodes");
if (rejectedDueToInsufficientRealResources > 0)
reasons.add("insufficient real resources on hosts");
if (reasons.isEmpty()) return "";
return ": Not enough nodes available due to " + String.join(", ", reasons);
}
static class FlavorCount {
private final NodeResources flavor;
private final int count;
private FlavorCount(NodeResources flavor, int count) {
this.flavor = flavor;
this.count = count;
}
NodeResources getFlavor() {
return flavor;
}
int getCount() {
return count;
}
}
} | class NodeAllocation {
/** List of all nodes in node-repository */
private final NodeList allNodes;
/** The application this list is for */
private final ApplicationId application;
/** The cluster this list is for */
private final ClusterSpec cluster;
/** The requested nodes of this list */
private final NodeSpec requestedNodes;
/** The node candidates this has accepted so far, keyed on hostname */
private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>();
/** The number of already allocated nodes accepted and not retired */
private int accepted = 0;
/** The number of already allocated nodes accepted and not retired and not needing resize */
private int acceptedWithoutResizingRetired = 0;
/** The number of nodes rejected because of clashing parentHostname */
private int rejectedDueToClashingParentHost = 0;
/** The number of nodes rejected due to exclusivity constraints */
private int rejectedDueToExclusivity = 0;
private int rejectedDueToInsufficientRealResources = 0;
/** The number of nodes that just now was changed to retired */
private int wasRetiredJustNow = 0;
/** The node indexes to verify uniqueness of each members index */
private final Set<Integer> indexes = new HashSet<>();
/** The next membership index to assign to a new node */
private final Supplier<Integer> nextIndex;
private final NodeRepository nodeRepository;
private final NodeResourceLimits nodeResourceLimits;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
Supplier<Integer> nextIndex, NodeRepository nodeRepository) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
this.requestedNodes = requestedNodes;
this.nextIndex = nextIndex;
this.nodeRepository = nodeRepository;
nodeResourceLimits = new NodeResourceLimits(nodeRepository);
}
/**
* Offer some nodes to this. The nodes may have an allocation to a different application or cluster,
* an allocation to this cluster, or no current allocation (in which case one is assigned).
*
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
* @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc.
* @return the subset of offeredNodes which was accepted, with the correct allocation assigned
*/
List<Node> offer(List<NodeCandidate> nodesPrioritized) {
List<Node> accepted = new ArrayList<>();
for (NodeCandidate candidate : nodesPrioritized) {
if (candidate.allocation().isPresent()) {
Allocation allocation = candidate.allocation().get();
ClusterMembership membership = allocation.membership();
if ( ! allocation.owner().equals(application)) continue;
if ( ! membership.cluster().satisfies(cluster)) continue;
if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue;
if ( candidate.state() == Node.State.active && allocation.isRemovable()) continue;
if ( indexes.contains(membership.index())) continue;
boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable;
boolean acceptToRetire = acceptToRetire(candidate);
if ((! saturated() && hasCompatibleFlavor(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) {
candidate = candidate.withNode();
if (candidate.isValid())
accepted.add(acceptNode(candidate, shouldRetire(candidate, nodesPrioritized), resizeable));
}
}
else if (! saturated() && hasCompatibleFlavor(candidate)) {
if (requestedNodes.type() == NodeType.tenant && ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
if ( violatesParentHostPolicy(candidate)) {
++rejectedDueToClashingParentHost;
continue;
}
if ( violatesExclusivity(candidate)) {
++rejectedDueToExclusivity;
continue;
}
if (candidate.wantToRetire()) {
continue;
}
candidate = candidate.allocate(application,
ClusterMembership.from(cluster, nextIndex.get()),
requestedNodes.resources().orElse(candidate.resources()),
nodeRepository.clock().instant());
if (candidate.isValid())
accepted.add(acceptNode(candidate, false, false));
}
}
return accepted;
}
private boolean shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) {
if ( ! requestedNodes.considerRetiring())
return candidate.allocation().map(a -> a.membership().retired()).orElse(false);
if ( ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) return true;
if (violatesParentHostPolicy(candidate)) return true;
if ( ! hasCompatibleFlavor(candidate)) return true;
if (candidate.wantToRetire()) return true;
if (candidate.preferToRetire() && candidate.replacableBy(candidates)) return true;
if (violatesExclusivity(candidate)) return true;
return false;
}
private boolean violatesParentHostPolicy(NodeCandidate candidate) {
return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate);
}
private boolean checkForClashingParentHost() {
return nodeRepository.zone().system() == SystemName.main &&
nodeRepository.zone().environment().isProduction() &&
! application.instance().isTester();
}
private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) {
for (NodeCandidate acceptedNode : nodes.values()) {
if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() &&
acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) {
return true;
}
}
return false;
}
private boolean violatesExclusivity(NodeCandidate candidate) {
if (candidate.parentHostname().isEmpty()) return false;
if (nodeRepository.zone().getCloud().dynamicProvisioning())
return requestedNodes.isExclusive() &&
! candidate.parent.flatMap(Node::exclusiveTo).map(application::equals).orElse(false);
for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) {
if (nodeOnHost.allocation().isEmpty()) continue;
if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) {
if ( ! nodeOnHost.allocation().get().owner().equals(application)) return true;
}
}
return false;
}
/**
* Returns whether this node should be accepted into the cluster even if it is not currently desired
* (already enough nodes, or wrong flavor).
* Such nodes will be marked retired during finalization of the list of accepted nodes.
* The conditions for this are:
*
* This is a stateful node. These must always be retired before being removed to allow the cluster to
* migrate away data.
*
* This is a container node and it is not desired due to having the wrong flavor. In this case this
* will (normally) obtain for all the current nodes in the cluster and so retiring before removing must
* be used to avoid removing all the current nodes at once, before the newly allocated replacements are
* initialized. (In the other case, where a container node is not desired because we have enough nodes we
* do want to remove it immediately to get immediate feedback on how the size reduction works out.)
*/
private boolean acceptToRetire(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false;
if (candidate.allocation().get().membership().retired()) return true;
if (! requestedNodes.considerRetiring()) return false;
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleFlavor(candidate));
}
private boolean hasCompatibleFlavor(NodeCandidate candidate) {
return requestedNodes.isCompatible(candidate.flavor(), nodeRepository.flavors()) || candidate.isResizable;
}
private Node acceptNode(NodeCandidate candidate, boolean shouldRetire, boolean resizeable) {
Node node = candidate.toNode();
if (node.allocation().isPresent())
node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources())));
if (! shouldRetire) {
accepted++;
if (node.allocation().isEmpty()
|| ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired()))
acceptedWithoutResizingRetired++;
if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired()))
node = resize(node);
if (node.state() != Node.State.active)
node = node.unretire().removable(false);
} else {
++wasRetiredJustNow;
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
node = setCluster(cluster, node);
}
candidate = candidate.withNode(node);
indexes.add(node.allocation().get().membership().index());
nodes.put(node.hostname(), candidate);
return node;
}
private Node resize(Node node) {
NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
return node.with(new Flavor(requestedNodes.resources().get()
.with(hostResources.diskSpeed())
.with(hostResources.storageType())));
}
private Node setCluster(ClusterSpec cluster, Node node) {
ClusterMembership membership = node.allocation().get().membership().with(cluster);
return node.with(node.allocation().get().with(membership));
}
/** Returns true if no more nodes are needed in this list */
private boolean saturated() {
return requestedNodes.saturatedBy(acceptedWithoutResizingRetired);
}
/** Returns true if the content of this list is sufficient to meet the request */
boolean fulfilled() {
return requestedNodes.fulfilledBy(accepted());
}
/** Returns true if this allocation was already fulfilled and resulted in no new changes */
public boolean fulfilledAndNoChanges() {
return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty();
}
/**
* Returns {@link FlavorCount} describing the node deficit for the given {@link NodeSpec}.
*
* @return empty if the requested spec is already fulfilled. Otherwise returns {@link FlavorCount} containing the
* flavor and node count required to cover the deficit.
*/
Optional<FlavorCount> nodeDeficit() {
if (nodeType() != NodeType.config && nodeType() != NodeType.tenant) {
return Optional.empty();
}
return Optional.of(new FlavorCount(requestedNodes.resources().orElseGet(NodeResources::unspecified),
requestedNodes.fulfilledDeficitCount(accepted())))
.filter(flavorCount -> flavorCount.getCount() > 0);
}
/** Returns the indices to use when provisioning hosts for this */
/** The node type this is allocating */
NodeType nodeType() {
return requestedNodes.type();
}
/**
* Make the number of <i>non-retired</i> nodes in the list equal to the requested number
* of nodes, and retire the rest of the list. Only retire currently active nodes.
* Prefer to retire nodes of the wrong flavor.
* Make as few changes to the retired set as possible.
*
* @return the final list of nodes
*/
List<Node> finalNodes() {
int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count();
int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), currentRetiredCount) - currentRetiredCount;
if (deltaRetiredCount > 0) {
for (NodeCandidate candidate : byRetiringPriority(nodes.values())) {
if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) {
candidate = candidate.withNode();
candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant()));
nodes.put(candidate.toNode().hostname(), candidate);
if (--deltaRetiredCount == 0) break;
}
}
}
else if (deltaRetiredCount < 0) {
for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) {
if ( candidate.allocation().get().membership().retired() && hasCompatibleFlavor(candidate) ) {
candidate = candidate.withNode();
if (candidate.isResizable)
candidate = candidate.withNode(resize(candidate.toNode()));
candidate = candidate.withNode(candidate.toNode().unretire());
nodes.put(candidate.toNode().hostname(), candidate);
if (++deltaRetiredCount == 0) break;
}
}
}
for (NodeCandidate candidate : nodes.values()) {
candidate = candidate.withNode();
Allocation allocation = candidate.allocation().get();
candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership()
.with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive())))));
nodes.put(candidate.toNode().hostname(), candidate);
}
return nodes.values().stream().map(n -> n.toNode()).collect(Collectors.toList());
}
List<Node> reservableNodes() {
EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved);
return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state()));
}
List<Node> newNodes() {
return nodesFilter(n -> n.isNew);
}
private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) {
return nodes.values().stream()
.filter(predicate)
.map(n -> n.toNode())
.collect(Collectors.toList());
}
/** Returns the number of nodes accepted this far */
private int accepted() {
if (nodeType() == NodeType.tenant) return accepted;
return allNodes.nodeType(nodeType()).size();
}
/** Prefer to retire nodes we want the least */
private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream().sorted(Comparator.reverseOrder()).collect(Collectors.toList());
}
/** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */
private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream()
.sorted(Comparator.comparing(NodeCandidate::wantToRetire)
.thenComparing(n -> n.allocation().get().membership().index()))
.collect(Collectors.toList());
}
public String outOfCapacityDetails() {
List<String> reasons = new ArrayList<>();
if (rejectedDueToExclusivity > 0)
reasons.add("host exclusivity constraints");
if (rejectedDueToClashingParentHost > 0)
reasons.add("insufficient nodes available on separate physical hosts");
if (wasRetiredJustNow > 0)
reasons.add("retirement of allocated nodes");
if (rejectedDueToInsufficientRealResources > 0)
reasons.add("insufficient real resources on hosts");
if (reasons.isEmpty()) return "";
return ": Not enough nodes available due to " + String.join(", ", reasons);
}
private static Integer parseIndex(String hostname) {
try {
return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1"));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e);
}
}
static class FlavorCount {
private final NodeResources flavor;
private final int count;
private FlavorCount(NodeResources flavor, int count) {
this.flavor = flavor;
this.count = count;
}
NodeResources getFlavor() {
return flavor;
}
int getCount() {
return count;
}
}
} |
It does look like the initial index maps to hostname index with the +1 relationship, but in zones that have replaced a cfg, their index is >= 3. We have no started filling index gaps, so I guess we will overtime return to all `config` being in [0-2], but isn't it possible if they get reprovisioned in "wrong" order, this breaks? E.g.: Say cfg1 has index 5, cfg2 has 6, cfg3 has 2. If we attempt to reprovision cfg2, it will be given index 1, which will overwrite the original cfg1's DNS records? | List<Integer> provisionIndices(int count) {
if (count == 0) return List.of();
NodeType hostType = requestedNodes.type().hostType();
if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count);
int offset = 1;
Set<Integer> currentIndices = allNodes.nodeType(hostType)
.stream()
.map(node -> node.allocation().get().membership().index())
.map(index -> index + offset)
.collect(Collectors.toSet());
List<Integer> indices = new ArrayList<>(count);
for (int i = offset; indices.size() < count; i++) {
if (!currentIndices.contains(i)) {
indices.add(i);
}
}
return indices;
} | Set<Integer> currentIndices = allNodes.nodeType(hostType) | List<Integer> provisionIndices(int count) {
if (count < 1) throw new IllegalArgumentException("Count must be positive");
NodeType hostType = requestedNodes.type().hostType();
if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count);
Set<Integer> currentIndices = allNodes.nodeType(hostType)
.stream()
.map(Node::hostname)
.map(NodeAllocation::parseIndex)
.collect(Collectors.toSet());
List<Integer> indices = new ArrayList<>(count);
for (int i = 1; indices.size() < count; i++) {
if (!currentIndices.contains(i)) {
indices.add(i);
}
}
return indices;
} | class NodeAllocation {
/** List of all nodes in node-repository */
private final NodeList allNodes;
/** The application this list is for */
private final ApplicationId application;
/** The cluster this list is for */
private final ClusterSpec cluster;
/** The requested nodes of this list */
private final NodeSpec requestedNodes;
/** The node candidates this has accepted so far, keyed on hostname */
private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>();
/** The number of already allocated nodes accepted and not retired */
private int accepted = 0;
/** The number of already allocated nodes accepted and not retired and not needing resize */
private int acceptedWithoutResizingRetired = 0;
/** The number of nodes rejected because of clashing parentHostname */
private int rejectedDueToClashingParentHost = 0;
/** The number of nodes rejected due to exclusivity constraints */
private int rejectedDueToExclusivity = 0;
private int rejectedDueToInsufficientRealResources = 0;
/** The number of nodes that just now was changed to retired */
private int wasRetiredJustNow = 0;
/** The node indexes to verify uniqueness of each members index */
private final Set<Integer> indexes = new HashSet<>();
/** The next membership index to assign to a new node */
private final Supplier<Integer> nextIndex;
private final NodeRepository nodeRepository;
private final NodeResourceLimits nodeResourceLimits;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
Supplier<Integer> nextIndex, NodeRepository nodeRepository) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
this.requestedNodes = requestedNodes;
this.nextIndex = nextIndex;
this.nodeRepository = nodeRepository;
nodeResourceLimits = new NodeResourceLimits(nodeRepository);
}
/**
* Offer some nodes to this. The nodes may have an allocation to a different application or cluster,
* an allocation to this cluster, or no current allocation (in which case one is assigned).
*
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
* @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc.
* @return the subset of offeredNodes which was accepted, with the correct allocation assigned
*/
List<Node> offer(List<NodeCandidate> nodesPrioritized) {
List<Node> accepted = new ArrayList<>();
for (NodeCandidate candidate : nodesPrioritized) {
if (candidate.allocation().isPresent()) {
Allocation allocation = candidate.allocation().get();
ClusterMembership membership = allocation.membership();
if ( ! allocation.owner().equals(application)) continue;
if ( ! membership.cluster().satisfies(cluster)) continue;
if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue;
if ( candidate.state() == Node.State.active && allocation.isRemovable()) continue;
if ( indexes.contains(membership.index())) continue;
boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable;
boolean acceptToRetire = acceptToRetire(candidate);
if ((! saturated() && hasCompatibleFlavor(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) {
candidate = candidate.withNode();
if (candidate.isValid())
accepted.add(acceptNode(candidate, shouldRetire(candidate, nodesPrioritized), resizeable));
}
}
else if (! saturated() && hasCompatibleFlavor(candidate)) {
if (requestedNodes.type() == NodeType.tenant && ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
if ( violatesParentHostPolicy(candidate)) {
++rejectedDueToClashingParentHost;
continue;
}
if ( violatesExclusivity(candidate)) {
++rejectedDueToExclusivity;
continue;
}
if (candidate.wantToRetire()) {
continue;
}
candidate = candidate.allocate(application,
ClusterMembership.from(cluster, nextIndex.get()),
requestedNodes.resources().orElse(candidate.resources()),
nodeRepository.clock().instant());
if (candidate.isValid())
accepted.add(acceptNode(candidate, false, false));
}
}
return accepted;
}
private boolean shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) {
if ( ! requestedNodes.considerRetiring())
return candidate.allocation().map(a -> a.membership().retired()).orElse(false);
if ( ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) return true;
if (violatesParentHostPolicy(candidate)) return true;
if ( ! hasCompatibleFlavor(candidate)) return true;
if (candidate.wantToRetire()) return true;
if (candidate.preferToRetire() && candidate.replacableBy(candidates)) return true;
if (violatesExclusivity(candidate)) return true;
return false;
}
private boolean violatesParentHostPolicy(NodeCandidate candidate) {
return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate);
}
private boolean checkForClashingParentHost() {
return nodeRepository.zone().system() == SystemName.main &&
nodeRepository.zone().environment().isProduction() &&
! application.instance().isTester();
}
private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) {
for (NodeCandidate acceptedNode : nodes.values()) {
if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() &&
acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) {
return true;
}
}
return false;
}
private boolean violatesExclusivity(NodeCandidate candidate) {
if (candidate.parentHostname().isEmpty()) return false;
if (nodeRepository.zone().getCloud().dynamicProvisioning())
return requestedNodes.isExclusive() &&
! candidate.parent.flatMap(Node::exclusiveTo).map(application::equals).orElse(false);
for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) {
if (nodeOnHost.allocation().isEmpty()) continue;
if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) {
if ( ! nodeOnHost.allocation().get().owner().equals(application)) return true;
}
}
return false;
}
/**
* Returns whether this node should be accepted into the cluster even if it is not currently desired
* (already enough nodes, or wrong flavor).
* Such nodes will be marked retired during finalization of the list of accepted nodes.
* The conditions for this are:
*
* This is a stateful node. These must always be retired before being removed to allow the cluster to
* migrate away data.
*
* This is a container node and it is not desired due to having the wrong flavor. In this case this
* will (normally) obtain for all the current nodes in the cluster and so retiring before removing must
* be used to avoid removing all the current nodes at once, before the newly allocated replacements are
* initialized. (In the other case, where a container node is not desired because we have enough nodes we
* do want to remove it immediately to get immediate feedback on how the size reduction works out.)
*/
private boolean acceptToRetire(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false;
if (candidate.allocation().get().membership().retired()) return true;
if (! requestedNodes.considerRetiring()) return false;
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleFlavor(candidate));
}
private boolean hasCompatibleFlavor(NodeCandidate candidate) {
return requestedNodes.isCompatible(candidate.flavor(), nodeRepository.flavors()) || candidate.isResizable;
}
private Node acceptNode(NodeCandidate candidate, boolean shouldRetire, boolean resizeable) {
Node node = candidate.toNode();
if (node.allocation().isPresent())
node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources())));
if (! shouldRetire) {
accepted++;
if (node.allocation().isEmpty()
|| ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired()))
acceptedWithoutResizingRetired++;
if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired()))
node = resize(node);
if (node.state() != Node.State.active)
node = node.unretire().removable(false);
} else {
++wasRetiredJustNow;
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
node = setCluster(cluster, node);
}
candidate = candidate.withNode(node);
indexes.add(node.allocation().get().membership().index());
nodes.put(node.hostname(), candidate);
return node;
}
private Node resize(Node node) {
NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
return node.with(new Flavor(requestedNodes.resources().get()
.with(hostResources.diskSpeed())
.with(hostResources.storageType())));
}
private Node setCluster(ClusterSpec cluster, Node node) {
ClusterMembership membership = node.allocation().get().membership().with(cluster);
return node.with(node.allocation().get().with(membership));
}
/** Returns true if no more nodes are needed in this list */
private boolean saturated() {
return requestedNodes.saturatedBy(acceptedWithoutResizingRetired);
}
/** Returns true if the content of this list is sufficient to meet the request */
boolean fulfilled() {
return requestedNodes.fulfilledBy(accepted());
}
/** Returns true if this allocation was already fulfilled and resulted in no new changes */
public boolean fulfilledAndNoChanges() {
return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty();
}
/**
* Returns {@link FlavorCount} describing the node deficit for the given {@link NodeSpec}.
*
* @return empty if the requested spec is already fulfilled. Otherwise returns {@link FlavorCount} containing the
* flavor and node count required to cover the deficit.
*/
Optional<FlavorCount> nodeDeficit() {
if (nodeType() != NodeType.config && nodeType() != NodeType.tenant) {
return Optional.empty();
}
return Optional.of(new FlavorCount(requestedNodes.resources().orElseGet(NodeResources::unspecified),
requestedNodes.fulfilledDeficitCount(accepted())))
.filter(flavorCount -> flavorCount.getCount() > 0);
}
/** Returns the indices to use when provisioning hosts for this */
/** The node type this is allocating */
NodeType nodeType() {
return requestedNodes.type();
}
/**
* Make the number of <i>non-retired</i> nodes in the list equal to the requested number
* of nodes, and retire the rest of the list. Only retire currently active nodes.
* Prefer to retire nodes of the wrong flavor.
* Make as few changes to the retired set as possible.
*
* @return the final list of nodes
*/
List<Node> finalNodes() {
int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count();
int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), currentRetiredCount) - currentRetiredCount;
if (deltaRetiredCount > 0) {
for (NodeCandidate candidate : byRetiringPriority(nodes.values())) {
if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) {
candidate = candidate.withNode();
candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant()));
nodes.put(candidate.toNode().hostname(), candidate);
if (--deltaRetiredCount == 0) break;
}
}
}
else if (deltaRetiredCount < 0) {
for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) {
if ( candidate.allocation().get().membership().retired() && hasCompatibleFlavor(candidate) ) {
candidate = candidate.withNode();
if (candidate.isResizable)
candidate = candidate.withNode(resize(candidate.toNode()));
candidate = candidate.withNode(candidate.toNode().unretire());
nodes.put(candidate.toNode().hostname(), candidate);
if (++deltaRetiredCount == 0) break;
}
}
}
for (NodeCandidate candidate : nodes.values()) {
candidate = candidate.withNode();
Allocation allocation = candidate.allocation().get();
candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership()
.with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive())))));
nodes.put(candidate.toNode().hostname(), candidate);
}
return nodes.values().stream().map(n -> n.toNode()).collect(Collectors.toList());
}
List<Node> reservableNodes() {
EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved);
return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state()));
}
List<Node> newNodes() {
return nodesFilter(n -> n.isNew);
}
private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) {
return nodes.values().stream()
.filter(predicate)
.map(n -> n.toNode())
.collect(Collectors.toList());
}
/** Returns the number of nodes accepted this far */
private int accepted() {
if (nodeType() == NodeType.tenant) return accepted;
return allNodes.nodeType(nodeType()).size();
}
/** Prefer to retire nodes we want the least */
private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream().sorted(Comparator.reverseOrder()).collect(Collectors.toList());
}
/** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */
private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream()
.sorted(Comparator.comparing(NodeCandidate::wantToRetire)
.thenComparing(n -> n.allocation().get().membership().index()))
.collect(Collectors.toList());
}
public String outOfCapacityDetails() {
List<String> reasons = new ArrayList<>();
if (rejectedDueToExclusivity > 0)
reasons.add("host exclusivity constraints");
if (rejectedDueToClashingParentHost > 0)
reasons.add("insufficient nodes available on separate physical hosts");
if (wasRetiredJustNow > 0)
reasons.add("retirement of allocated nodes");
if (rejectedDueToInsufficientRealResources > 0)
reasons.add("insufficient real resources on hosts");
if (reasons.isEmpty()) return "";
return ": Not enough nodes available due to " + String.join(", ", reasons);
}
static class FlavorCount {
private final NodeResources flavor;
private final int count;
private FlavorCount(NodeResources flavor, int count) {
this.flavor = flavor;
this.count = count;
}
NodeResources getFlavor() {
return flavor;
}
int getCount() {
return count;
}
}
} | class NodeAllocation {
/** List of all nodes in node-repository */
private final NodeList allNodes;
/** The application this list is for */
private final ApplicationId application;
/** The cluster this list is for */
private final ClusterSpec cluster;
/** The requested nodes of this list */
private final NodeSpec requestedNodes;
/** The node candidates this has accepted so far, keyed on hostname */
private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>();
/** The number of already allocated nodes accepted and not retired */
private int accepted = 0;
/** The number of already allocated nodes accepted and not retired and not needing resize */
private int acceptedWithoutResizingRetired = 0;
/** The number of nodes rejected because of clashing parentHostname */
private int rejectedDueToClashingParentHost = 0;
/** The number of nodes rejected due to exclusivity constraints */
private int rejectedDueToExclusivity = 0;
private int rejectedDueToInsufficientRealResources = 0;
/** The number of nodes that just now was changed to retired */
private int wasRetiredJustNow = 0;
/** The node indexes to verify uniqueness of each members index */
private final Set<Integer> indexes = new HashSet<>();
/** The next membership index to assign to a new node */
private final Supplier<Integer> nextIndex;
private final NodeRepository nodeRepository;
private final NodeResourceLimits nodeResourceLimits;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
Supplier<Integer> nextIndex, NodeRepository nodeRepository) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
this.requestedNodes = requestedNodes;
this.nextIndex = nextIndex;
this.nodeRepository = nodeRepository;
nodeResourceLimits = new NodeResourceLimits(nodeRepository);
}
/**
* Offer some nodes to this. The nodes may have an allocation to a different application or cluster,
* an allocation to this cluster, or no current allocation (in which case one is assigned).
*
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
* @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc.
* @return the subset of offeredNodes which was accepted, with the correct allocation assigned
*/
List<Node> offer(List<NodeCandidate> nodesPrioritized) {
List<Node> accepted = new ArrayList<>();
for (NodeCandidate candidate : nodesPrioritized) {
if (candidate.allocation().isPresent()) {
Allocation allocation = candidate.allocation().get();
ClusterMembership membership = allocation.membership();
if ( ! allocation.owner().equals(application)) continue;
if ( ! membership.cluster().satisfies(cluster)) continue;
if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue;
if ( candidate.state() == Node.State.active && allocation.isRemovable()) continue;
if ( indexes.contains(membership.index())) continue;
boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable;
boolean acceptToRetire = acceptToRetire(candidate);
if ((! saturated() && hasCompatibleFlavor(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) {
candidate = candidate.withNode();
if (candidate.isValid())
accepted.add(acceptNode(candidate, shouldRetire(candidate, nodesPrioritized), resizeable));
}
}
else if (! saturated() && hasCompatibleFlavor(candidate)) {
if (requestedNodes.type() == NodeType.tenant && ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
if ( violatesParentHostPolicy(candidate)) {
++rejectedDueToClashingParentHost;
continue;
}
if ( violatesExclusivity(candidate)) {
++rejectedDueToExclusivity;
continue;
}
if (candidate.wantToRetire()) {
continue;
}
candidate = candidate.allocate(application,
ClusterMembership.from(cluster, nextIndex.get()),
requestedNodes.resources().orElse(candidate.resources()),
nodeRepository.clock().instant());
if (candidate.isValid())
accepted.add(acceptNode(candidate, false, false));
}
}
return accepted;
}
private boolean shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) {
if ( ! requestedNodes.considerRetiring())
return candidate.allocation().map(a -> a.membership().retired()).orElse(false);
if ( ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) return true;
if (violatesParentHostPolicy(candidate)) return true;
if ( ! hasCompatibleFlavor(candidate)) return true;
if (candidate.wantToRetire()) return true;
if (candidate.preferToRetire() && candidate.replacableBy(candidates)) return true;
if (violatesExclusivity(candidate)) return true;
return false;
}
private boolean violatesParentHostPolicy(NodeCandidate candidate) {
return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate);
}
private boolean checkForClashingParentHost() {
return nodeRepository.zone().system() == SystemName.main &&
nodeRepository.zone().environment().isProduction() &&
! application.instance().isTester();
}
private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) {
for (NodeCandidate acceptedNode : nodes.values()) {
if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() &&
acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) {
return true;
}
}
return false;
}
private boolean violatesExclusivity(NodeCandidate candidate) {
if (candidate.parentHostname().isEmpty()) return false;
if (nodeRepository.zone().getCloud().dynamicProvisioning())
return requestedNodes.isExclusive() &&
! candidate.parent.flatMap(Node::exclusiveTo).map(application::equals).orElse(false);
for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) {
if (nodeOnHost.allocation().isEmpty()) continue;
if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) {
if ( ! nodeOnHost.allocation().get().owner().equals(application)) return true;
}
}
return false;
}
/**
* Returns whether this node should be accepted into the cluster even if it is not currently desired
* (already enough nodes, or wrong flavor).
* Such nodes will be marked retired during finalization of the list of accepted nodes.
* The conditions for this are:
*
* This is a stateful node. These must always be retired before being removed to allow the cluster to
* migrate away data.
*
* This is a container node and it is not desired due to having the wrong flavor. In this case this
* will (normally) obtain for all the current nodes in the cluster and so retiring before removing must
* be used to avoid removing all the current nodes at once, before the newly allocated replacements are
* initialized. (In the other case, where a container node is not desired because we have enough nodes we
* do want to remove it immediately to get immediate feedback on how the size reduction works out.)
*/
private boolean acceptToRetire(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false;
if (candidate.allocation().get().membership().retired()) return true;
if (! requestedNodes.considerRetiring()) return false;
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleFlavor(candidate));
}
private boolean hasCompatibleFlavor(NodeCandidate candidate) {
return requestedNodes.isCompatible(candidate.flavor(), nodeRepository.flavors()) || candidate.isResizable;
}
private Node acceptNode(NodeCandidate candidate, boolean shouldRetire, boolean resizeable) {
Node node = candidate.toNode();
if (node.allocation().isPresent())
node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources())));
if (! shouldRetire) {
accepted++;
if (node.allocation().isEmpty()
|| ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired()))
acceptedWithoutResizingRetired++;
if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired()))
node = resize(node);
if (node.state() != Node.State.active)
node = node.unretire().removable(false);
} else {
++wasRetiredJustNow;
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
node = setCluster(cluster, node);
}
candidate = candidate.withNode(node);
indexes.add(node.allocation().get().membership().index());
nodes.put(node.hostname(), candidate);
return node;
}
private Node resize(Node node) {
NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
return node.with(new Flavor(requestedNodes.resources().get()
.with(hostResources.diskSpeed())
.with(hostResources.storageType())));
}
private Node setCluster(ClusterSpec cluster, Node node) {
ClusterMembership membership = node.allocation().get().membership().with(cluster);
return node.with(node.allocation().get().with(membership));
}
/** Returns true if no more nodes are needed in this list */
private boolean saturated() {
return requestedNodes.saturatedBy(acceptedWithoutResizingRetired);
}
/** Returns true if the content of this list is sufficient to meet the request */
boolean fulfilled() {
return requestedNodes.fulfilledBy(accepted());
}
/** Returns true if this allocation was already fulfilled and resulted in no new changes */
public boolean fulfilledAndNoChanges() {
return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty();
}
/**
* Returns {@link FlavorCount} describing the node deficit for the given {@link NodeSpec}.
*
* @return empty if the requested spec is already fulfilled. Otherwise returns {@link FlavorCount} containing the
* flavor and node count required to cover the deficit.
*/
Optional<FlavorCount> nodeDeficit() {
if (nodeType() != NodeType.config && nodeType() != NodeType.tenant) {
return Optional.empty();
}
return Optional.of(new FlavorCount(requestedNodes.resources().orElseGet(NodeResources::unspecified),
requestedNodes.fulfilledDeficitCount(accepted())))
.filter(flavorCount -> flavorCount.getCount() > 0);
}
/** Returns the indices to use when provisioning hosts for this */
/** The node type this is allocating */
NodeType nodeType() {
return requestedNodes.type();
}
/**
* Make the number of <i>non-retired</i> nodes in the list equal to the requested number
* of nodes, and retire the rest of the list. Only retire currently active nodes.
* Prefer to retire nodes of the wrong flavor.
* Make as few changes to the retired set as possible.
*
* @return the final list of nodes
*/
List<Node> finalNodes() {
int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count();
int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), currentRetiredCount) - currentRetiredCount;
if (deltaRetiredCount > 0) {
for (NodeCandidate candidate : byRetiringPriority(nodes.values())) {
if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) {
candidate = candidate.withNode();
candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant()));
nodes.put(candidate.toNode().hostname(), candidate);
if (--deltaRetiredCount == 0) break;
}
}
}
else if (deltaRetiredCount < 0) {
for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) {
if ( candidate.allocation().get().membership().retired() && hasCompatibleFlavor(candidate) ) {
candidate = candidate.withNode();
if (candidate.isResizable)
candidate = candidate.withNode(resize(candidate.toNode()));
candidate = candidate.withNode(candidate.toNode().unretire());
nodes.put(candidate.toNode().hostname(), candidate);
if (++deltaRetiredCount == 0) break;
}
}
}
for (NodeCandidate candidate : nodes.values()) {
candidate = candidate.withNode();
Allocation allocation = candidate.allocation().get();
candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership()
.with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive())))));
nodes.put(candidate.toNode().hostname(), candidate);
}
return nodes.values().stream().map(n -> n.toNode()).collect(Collectors.toList());
}
List<Node> reservableNodes() {
EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved);
return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state()));
}
List<Node> newNodes() {
return nodesFilter(n -> n.isNew);
}
private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) {
return nodes.values().stream()
.filter(predicate)
.map(n -> n.toNode())
.collect(Collectors.toList());
}
/** Returns the number of nodes accepted this far */
private int accepted() {
if (nodeType() == NodeType.tenant) return accepted;
return allNodes.nodeType(nodeType()).size();
}
/** Prefer to retire nodes we want the least */
private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream().sorted(Comparator.reverseOrder()).collect(Collectors.toList());
}
/** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */
private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream()
.sorted(Comparator.comparing(NodeCandidate::wantToRetire)
.thenComparing(n -> n.allocation().get().membership().index()))
.collect(Collectors.toList());
}
public String outOfCapacityDetails() {
List<String> reasons = new ArrayList<>();
if (rejectedDueToExclusivity > 0)
reasons.add("host exclusivity constraints");
if (rejectedDueToClashingParentHost > 0)
reasons.add("insufficient nodes available on separate physical hosts");
if (wasRetiredJustNow > 0)
reasons.add("retirement of allocated nodes");
if (rejectedDueToInsufficientRealResources > 0)
reasons.add("insufficient real resources on hosts");
if (reasons.isEmpty()) return "";
return ": Not enough nodes available due to " + String.join(", ", reasons);
}
private static Integer parseIndex(String hostname) {
try {
return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1"));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e);
}
}
static class FlavorCount {
private final NodeResources flavor;
private final int count;
private FlavorCount(NodeResources flavor, int count) {
this.flavor = flavor;
this.count = count;
}
NodeResources getFlavor() {
return flavor;
}
int getCount() {
return count;
}
}
} |
True, it can throw instead. | List<Integer> provisionIndices(int count) {
if (count == 0) return List.of();
NodeType hostType = requestedNodes.type().hostType();
if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count);
int offset = 1;
Set<Integer> currentIndices = allNodes.nodeType(hostType)
.stream()
.map(node -> node.allocation().get().membership().index())
.map(index -> index + offset)
.collect(Collectors.toSet());
List<Integer> indices = new ArrayList<>(count);
for (int i = offset; indices.size() < count; i++) {
if (!currentIndices.contains(i)) {
indices.add(i);
}
}
return indices;
} | if (count == 0) return List.of(); | List<Integer> provisionIndices(int count) {
if (count < 1) throw new IllegalArgumentException("Count must be positive");
NodeType hostType = requestedNodes.type().hostType();
if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count);
Set<Integer> currentIndices = allNodes.nodeType(hostType)
.stream()
.map(Node::hostname)
.map(NodeAllocation::parseIndex)
.collect(Collectors.toSet());
List<Integer> indices = new ArrayList<>(count);
for (int i = 1; indices.size() < count; i++) {
if (!currentIndices.contains(i)) {
indices.add(i);
}
}
return indices;
} | class NodeAllocation {
/** List of all nodes in node-repository */
private final NodeList allNodes;
/** The application this list is for */
private final ApplicationId application;
/** The cluster this list is for */
private final ClusterSpec cluster;
/** The requested nodes of this list */
private final NodeSpec requestedNodes;
/** The node candidates this has accepted so far, keyed on hostname */
private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>();
/** The number of already allocated nodes accepted and not retired */
private int accepted = 0;
/** The number of already allocated nodes accepted and not retired and not needing resize */
private int acceptedWithoutResizingRetired = 0;
/** The number of nodes rejected because of clashing parentHostname */
private int rejectedDueToClashingParentHost = 0;
/** The number of nodes rejected due to exclusivity constraints */
private int rejectedDueToExclusivity = 0;
private int rejectedDueToInsufficientRealResources = 0;
/** The number of nodes that just now was changed to retired */
private int wasRetiredJustNow = 0;
/** The node indexes to verify uniqueness of each members index */
private final Set<Integer> indexes = new HashSet<>();
/** The next membership index to assign to a new node */
private final Supplier<Integer> nextIndex;
private final NodeRepository nodeRepository;
private final NodeResourceLimits nodeResourceLimits;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
Supplier<Integer> nextIndex, NodeRepository nodeRepository) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
this.requestedNodes = requestedNodes;
this.nextIndex = nextIndex;
this.nodeRepository = nodeRepository;
nodeResourceLimits = new NodeResourceLimits(nodeRepository);
}
/**
* Offer some nodes to this. The nodes may have an allocation to a different application or cluster,
* an allocation to this cluster, or no current allocation (in which case one is assigned).
*
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
* @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc.
* @return the subset of offeredNodes which was accepted, with the correct allocation assigned
*/
List<Node> offer(List<NodeCandidate> nodesPrioritized) {
List<Node> accepted = new ArrayList<>();
for (NodeCandidate candidate : nodesPrioritized) {
if (candidate.allocation().isPresent()) {
Allocation allocation = candidate.allocation().get();
ClusterMembership membership = allocation.membership();
if ( ! allocation.owner().equals(application)) continue;
if ( ! membership.cluster().satisfies(cluster)) continue;
if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue;
if ( candidate.state() == Node.State.active && allocation.isRemovable()) continue;
if ( indexes.contains(membership.index())) continue;
boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable;
boolean acceptToRetire = acceptToRetire(candidate);
if ((! saturated() && hasCompatibleFlavor(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) {
candidate = candidate.withNode();
if (candidate.isValid())
accepted.add(acceptNode(candidate, shouldRetire(candidate, nodesPrioritized), resizeable));
}
}
else if (! saturated() && hasCompatibleFlavor(candidate)) {
if (requestedNodes.type() == NodeType.tenant && ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
if ( violatesParentHostPolicy(candidate)) {
++rejectedDueToClashingParentHost;
continue;
}
if ( violatesExclusivity(candidate)) {
++rejectedDueToExclusivity;
continue;
}
if (candidate.wantToRetire()) {
continue;
}
candidate = candidate.allocate(application,
ClusterMembership.from(cluster, nextIndex.get()),
requestedNodes.resources().orElse(candidate.resources()),
nodeRepository.clock().instant());
if (candidate.isValid())
accepted.add(acceptNode(candidate, false, false));
}
}
return accepted;
}
private boolean shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) {
if ( ! requestedNodes.considerRetiring())
return candidate.allocation().map(a -> a.membership().retired()).orElse(false);
if ( ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) return true;
if (violatesParentHostPolicy(candidate)) return true;
if ( ! hasCompatibleFlavor(candidate)) return true;
if (candidate.wantToRetire()) return true;
if (candidate.preferToRetire() && candidate.replacableBy(candidates)) return true;
if (violatesExclusivity(candidate)) return true;
return false;
}
private boolean violatesParentHostPolicy(NodeCandidate candidate) {
return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate);
}
private boolean checkForClashingParentHost() {
return nodeRepository.zone().system() == SystemName.main &&
nodeRepository.zone().environment().isProduction() &&
! application.instance().isTester();
}
private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) {
for (NodeCandidate acceptedNode : nodes.values()) {
if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() &&
acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) {
return true;
}
}
return false;
}
private boolean violatesExclusivity(NodeCandidate candidate) {
if (candidate.parentHostname().isEmpty()) return false;
if (nodeRepository.zone().getCloud().dynamicProvisioning())
return requestedNodes.isExclusive() &&
! candidate.parent.flatMap(Node::exclusiveTo).map(application::equals).orElse(false);
for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) {
if (nodeOnHost.allocation().isEmpty()) continue;
if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) {
if ( ! nodeOnHost.allocation().get().owner().equals(application)) return true;
}
}
return false;
}
/**
* Returns whether this node should be accepted into the cluster even if it is not currently desired
* (already enough nodes, or wrong flavor).
* Such nodes will be marked retired during finalization of the list of accepted nodes.
* The conditions for this are:
*
* This is a stateful node. These must always be retired before being removed to allow the cluster to
* migrate away data.
*
* This is a container node and it is not desired due to having the wrong flavor. In this case this
* will (normally) obtain for all the current nodes in the cluster and so retiring before removing must
* be used to avoid removing all the current nodes at once, before the newly allocated replacements are
* initialized. (In the other case, where a container node is not desired because we have enough nodes we
* do want to remove it immediately to get immediate feedback on how the size reduction works out.)
*/
private boolean acceptToRetire(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false;
if (candidate.allocation().get().membership().retired()) return true;
if (! requestedNodes.considerRetiring()) return false;
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleFlavor(candidate));
}
private boolean hasCompatibleFlavor(NodeCandidate candidate) {
return requestedNodes.isCompatible(candidate.flavor(), nodeRepository.flavors()) || candidate.isResizable;
}
private Node acceptNode(NodeCandidate candidate, boolean shouldRetire, boolean resizeable) {
Node node = candidate.toNode();
if (node.allocation().isPresent())
node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources())));
if (! shouldRetire) {
accepted++;
if (node.allocation().isEmpty()
|| ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired()))
acceptedWithoutResizingRetired++;
if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired()))
node = resize(node);
if (node.state() != Node.State.active)
node = node.unretire().removable(false);
} else {
++wasRetiredJustNow;
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
node = setCluster(cluster, node);
}
candidate = candidate.withNode(node);
indexes.add(node.allocation().get().membership().index());
nodes.put(node.hostname(), candidate);
return node;
}
private Node resize(Node node) {
NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
return node.with(new Flavor(requestedNodes.resources().get()
.with(hostResources.diskSpeed())
.with(hostResources.storageType())));
}
private Node setCluster(ClusterSpec cluster, Node node) {
ClusterMembership membership = node.allocation().get().membership().with(cluster);
return node.with(node.allocation().get().with(membership));
}
/** Returns true if no more nodes are needed in this list */
private boolean saturated() {
return requestedNodes.saturatedBy(acceptedWithoutResizingRetired);
}
/** Returns true if the content of this list is sufficient to meet the request */
boolean fulfilled() {
return requestedNodes.fulfilledBy(accepted());
}
/** Returns true if this allocation was already fulfilled and resulted in no new changes */
public boolean fulfilledAndNoChanges() {
return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty();
}
/**
* Returns {@link FlavorCount} describing the node deficit for the given {@link NodeSpec}.
*
* @return empty if the requested spec is already fulfilled. Otherwise returns {@link FlavorCount} containing the
* flavor and node count required to cover the deficit.
*/
Optional<FlavorCount> nodeDeficit() {
if (nodeType() != NodeType.config && nodeType() != NodeType.tenant) {
return Optional.empty();
}
return Optional.of(new FlavorCount(requestedNodes.resources().orElseGet(NodeResources::unspecified),
requestedNodes.fulfilledDeficitCount(accepted())))
.filter(flavorCount -> flavorCount.getCount() > 0);
}
/** Returns the indices to use when provisioning hosts for this */
/** The node type this is allocating */
NodeType nodeType() {
return requestedNodes.type();
}
/**
* Make the number of <i>non-retired</i> nodes in the list equal to the requested number
* of nodes, and retire the rest of the list. Only retire currently active nodes.
* Prefer to retire nodes of the wrong flavor.
* Make as few changes to the retired set as possible.
*
* @return the final list of nodes
*/
List<Node> finalNodes() {
int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count();
int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), currentRetiredCount) - currentRetiredCount;
if (deltaRetiredCount > 0) {
for (NodeCandidate candidate : byRetiringPriority(nodes.values())) {
if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) {
candidate = candidate.withNode();
candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant()));
nodes.put(candidate.toNode().hostname(), candidate);
if (--deltaRetiredCount == 0) break;
}
}
}
else if (deltaRetiredCount < 0) {
for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) {
if ( candidate.allocation().get().membership().retired() && hasCompatibleFlavor(candidate) ) {
candidate = candidate.withNode();
if (candidate.isResizable)
candidate = candidate.withNode(resize(candidate.toNode()));
candidate = candidate.withNode(candidate.toNode().unretire());
nodes.put(candidate.toNode().hostname(), candidate);
if (++deltaRetiredCount == 0) break;
}
}
}
for (NodeCandidate candidate : nodes.values()) {
candidate = candidate.withNode();
Allocation allocation = candidate.allocation().get();
candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership()
.with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive())))));
nodes.put(candidate.toNode().hostname(), candidate);
}
return nodes.values().stream().map(n -> n.toNode()).collect(Collectors.toList());
}
List<Node> reservableNodes() {
EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved);
return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state()));
}
List<Node> newNodes() {
return nodesFilter(n -> n.isNew);
}
private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) {
return nodes.values().stream()
.filter(predicate)
.map(n -> n.toNode())
.collect(Collectors.toList());
}
/** Returns the number of nodes accepted this far */
private int accepted() {
if (nodeType() == NodeType.tenant) return accepted;
return allNodes.nodeType(nodeType()).size();
}
/** Prefer to retire nodes we want the least */
private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream().sorted(Comparator.reverseOrder()).collect(Collectors.toList());
}
/** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */
private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream()
.sorted(Comparator.comparing(NodeCandidate::wantToRetire)
.thenComparing(n -> n.allocation().get().membership().index()))
.collect(Collectors.toList());
}
public String outOfCapacityDetails() {
List<String> reasons = new ArrayList<>();
if (rejectedDueToExclusivity > 0)
reasons.add("host exclusivity constraints");
if (rejectedDueToClashingParentHost > 0)
reasons.add("insufficient nodes available on separate physical hosts");
if (wasRetiredJustNow > 0)
reasons.add("retirement of allocated nodes");
if (rejectedDueToInsufficientRealResources > 0)
reasons.add("insufficient real resources on hosts");
if (reasons.isEmpty()) return "";
return ": Not enough nodes available due to " + String.join(", ", reasons);
}
static class FlavorCount {
private final NodeResources flavor;
private final int count;
private FlavorCount(NodeResources flavor, int count) {
this.flavor = flavor;
this.count = count;
}
NodeResources getFlavor() {
return flavor;
}
int getCount() {
return count;
}
}
} | class NodeAllocation {
/** List of all nodes in node-repository */
private final NodeList allNodes;
/** The application this list is for */
private final ApplicationId application;
/** The cluster this list is for */
private final ClusterSpec cluster;
/** The requested nodes of this list */
private final NodeSpec requestedNodes;
/** The node candidates this has accepted so far, keyed on hostname */
private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>();
/** The number of already allocated nodes accepted and not retired */
private int accepted = 0;
/** The number of already allocated nodes accepted and not retired and not needing resize */
private int acceptedWithoutResizingRetired = 0;
/** The number of nodes rejected because of clashing parentHostname */
private int rejectedDueToClashingParentHost = 0;
/** The number of nodes rejected due to exclusivity constraints */
private int rejectedDueToExclusivity = 0;
private int rejectedDueToInsufficientRealResources = 0;
/** The number of nodes that just now was changed to retired */
private int wasRetiredJustNow = 0;
/** The node indexes to verify uniqueness of each members index */
private final Set<Integer> indexes = new HashSet<>();
/** The next membership index to assign to a new node */
private final Supplier<Integer> nextIndex;
private final NodeRepository nodeRepository;
private final NodeResourceLimits nodeResourceLimits;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
Supplier<Integer> nextIndex, NodeRepository nodeRepository) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
this.requestedNodes = requestedNodes;
this.nextIndex = nextIndex;
this.nodeRepository = nodeRepository;
nodeResourceLimits = new NodeResourceLimits(nodeRepository);
}
/**
* Offer some nodes to this. The nodes may have an allocation to a different application or cluster,
* an allocation to this cluster, or no current allocation (in which case one is assigned).
*
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
* @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc.
* @return the subset of offeredNodes which was accepted, with the correct allocation assigned
*/
List<Node> offer(List<NodeCandidate> nodesPrioritized) {
List<Node> accepted = new ArrayList<>();
for (NodeCandidate candidate : nodesPrioritized) {
if (candidate.allocation().isPresent()) {
Allocation allocation = candidate.allocation().get();
ClusterMembership membership = allocation.membership();
if ( ! allocation.owner().equals(application)) continue;
if ( ! membership.cluster().satisfies(cluster)) continue;
if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue;
if ( candidate.state() == Node.State.active && allocation.isRemovable()) continue;
if ( indexes.contains(membership.index())) continue;
boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable;
boolean acceptToRetire = acceptToRetire(candidate);
if ((! saturated() && hasCompatibleFlavor(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) {
candidate = candidate.withNode();
if (candidate.isValid())
accepted.add(acceptNode(candidate, shouldRetire(candidate, nodesPrioritized), resizeable));
}
}
else if (! saturated() && hasCompatibleFlavor(candidate)) {
if (requestedNodes.type() == NodeType.tenant && ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
if ( violatesParentHostPolicy(candidate)) {
++rejectedDueToClashingParentHost;
continue;
}
if ( violatesExclusivity(candidate)) {
++rejectedDueToExclusivity;
continue;
}
if (candidate.wantToRetire()) {
continue;
}
candidate = candidate.allocate(application,
ClusterMembership.from(cluster, nextIndex.get()),
requestedNodes.resources().orElse(candidate.resources()),
nodeRepository.clock().instant());
if (candidate.isValid())
accepted.add(acceptNode(candidate, false, false));
}
}
return accepted;
}
private boolean shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) {
if ( ! requestedNodes.considerRetiring())
return candidate.allocation().map(a -> a.membership().retired()).orElse(false);
if ( ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) return true;
if (violatesParentHostPolicy(candidate)) return true;
if ( ! hasCompatibleFlavor(candidate)) return true;
if (candidate.wantToRetire()) return true;
if (candidate.preferToRetire() && candidate.replacableBy(candidates)) return true;
if (violatesExclusivity(candidate)) return true;
return false;
}
private boolean violatesParentHostPolicy(NodeCandidate candidate) {
return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate);
}
private boolean checkForClashingParentHost() {
return nodeRepository.zone().system() == SystemName.main &&
nodeRepository.zone().environment().isProduction() &&
! application.instance().isTester();
}
private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) {
for (NodeCandidate acceptedNode : nodes.values()) {
if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() &&
acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) {
return true;
}
}
return false;
}
private boolean violatesExclusivity(NodeCandidate candidate) {
if (candidate.parentHostname().isEmpty()) return false;
if (nodeRepository.zone().getCloud().dynamicProvisioning())
return requestedNodes.isExclusive() &&
! candidate.parent.flatMap(Node::exclusiveTo).map(application::equals).orElse(false);
for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) {
if (nodeOnHost.allocation().isEmpty()) continue;
if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) {
if ( ! nodeOnHost.allocation().get().owner().equals(application)) return true;
}
}
return false;
}
/**
* Returns whether this node should be accepted into the cluster even if it is not currently desired
* (already enough nodes, or wrong flavor).
* Such nodes will be marked retired during finalization of the list of accepted nodes.
* The conditions for this are:
*
* This is a stateful node. These must always be retired before being removed to allow the cluster to
* migrate away data.
*
* This is a container node and it is not desired due to having the wrong flavor. In this case this
* will (normally) obtain for all the current nodes in the cluster and so retiring before removing must
* be used to avoid removing all the current nodes at once, before the newly allocated replacements are
* initialized. (In the other case, where a container node is not desired because we have enough nodes we
* do want to remove it immediately to get immediate feedback on how the size reduction works out.)
*/
private boolean acceptToRetire(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false;
if (candidate.allocation().get().membership().retired()) return true;
if (! requestedNodes.considerRetiring()) return false;
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleFlavor(candidate));
}
private boolean hasCompatibleFlavor(NodeCandidate candidate) {
return requestedNodes.isCompatible(candidate.flavor(), nodeRepository.flavors()) || candidate.isResizable;
}
private Node acceptNode(NodeCandidate candidate, boolean shouldRetire, boolean resizeable) {
Node node = candidate.toNode();
if (node.allocation().isPresent())
node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources())));
if (! shouldRetire) {
accepted++;
if (node.allocation().isEmpty()
|| ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired()))
acceptedWithoutResizingRetired++;
if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired()))
node = resize(node);
if (node.state() != Node.State.active)
node = node.unretire().removable(false);
} else {
++wasRetiredJustNow;
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
node = setCluster(cluster, node);
}
candidate = candidate.withNode(node);
indexes.add(node.allocation().get().membership().index());
nodes.put(node.hostname(), candidate);
return node;
}
private Node resize(Node node) {
NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
return node.with(new Flavor(requestedNodes.resources().get()
.with(hostResources.diskSpeed())
.with(hostResources.storageType())));
}
private Node setCluster(ClusterSpec cluster, Node node) {
ClusterMembership membership = node.allocation().get().membership().with(cluster);
return node.with(node.allocation().get().with(membership));
}
/** Returns true if no more nodes are needed in this list */
private boolean saturated() {
return requestedNodes.saturatedBy(acceptedWithoutResizingRetired);
}
/** Returns true if the content of this list is sufficient to meet the request */
boolean fulfilled() {
return requestedNodes.fulfilledBy(accepted());
}
/** Returns true if this allocation was already fulfilled and resulted in no new changes */
public boolean fulfilledAndNoChanges() {
return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty();
}
/**
* Returns {@link FlavorCount} describing the node deficit for the given {@link NodeSpec}.
*
* @return empty if the requested spec is already fulfilled. Otherwise returns {@link FlavorCount} containing the
* flavor and node count required to cover the deficit.
*/
Optional<FlavorCount> nodeDeficit() {
if (nodeType() != NodeType.config && nodeType() != NodeType.tenant) {
return Optional.empty();
}
return Optional.of(new FlavorCount(requestedNodes.resources().orElseGet(NodeResources::unspecified),
requestedNodes.fulfilledDeficitCount(accepted())))
.filter(flavorCount -> flavorCount.getCount() > 0);
}
/** Returns the indices to use when provisioning hosts for this */
/** The node type this is allocating */
NodeType nodeType() {
return requestedNodes.type();
}
/**
* Make the number of <i>non-retired</i> nodes in the list equal to the requested number
* of nodes, and retire the rest of the list. Only retire currently active nodes.
* Prefer to retire nodes of the wrong flavor.
* Make as few changes to the retired set as possible.
*
* @return the final list of nodes
*/
List<Node> finalNodes() {
int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count();
int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), currentRetiredCount) - currentRetiredCount;
if (deltaRetiredCount > 0) {
for (NodeCandidate candidate : byRetiringPriority(nodes.values())) {
if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) {
candidate = candidate.withNode();
candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant()));
nodes.put(candidate.toNode().hostname(), candidate);
if (--deltaRetiredCount == 0) break;
}
}
}
else if (deltaRetiredCount < 0) {
for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) {
if ( candidate.allocation().get().membership().retired() && hasCompatibleFlavor(candidate) ) {
candidate = candidate.withNode();
if (candidate.isResizable)
candidate = candidate.withNode(resize(candidate.toNode()));
candidate = candidate.withNode(candidate.toNode().unretire());
nodes.put(candidate.toNode().hostname(), candidate);
if (++deltaRetiredCount == 0) break;
}
}
}
for (NodeCandidate candidate : nodes.values()) {
candidate = candidate.withNode();
Allocation allocation = candidate.allocation().get();
candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership()
.with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive())))));
nodes.put(candidate.toNode().hostname(), candidate);
}
return nodes.values().stream().map(n -> n.toNode()).collect(Collectors.toList());
}
List<Node> reservableNodes() {
EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved);
return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state()));
}
List<Node> newNodes() {
return nodesFilter(n -> n.isNew);
}
private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) {
return nodes.values().stream()
.filter(predicate)
.map(n -> n.toNode())
.collect(Collectors.toList());
}
/** Returns the number of nodes accepted this far */
private int accepted() {
if (nodeType() == NodeType.tenant) return accepted;
return allNodes.nodeType(nodeType()).size();
}
/** Prefer to retire nodes we want the least */
private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream().sorted(Comparator.reverseOrder()).collect(Collectors.toList());
}
/** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */
private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream()
.sorted(Comparator.comparing(NodeCandidate::wantToRetire)
.thenComparing(n -> n.allocation().get().membership().index()))
.collect(Collectors.toList());
}
public String outOfCapacityDetails() {
List<String> reasons = new ArrayList<>();
if (rejectedDueToExclusivity > 0)
reasons.add("host exclusivity constraints");
if (rejectedDueToClashingParentHost > 0)
reasons.add("insufficient nodes available on separate physical hosts");
if (wasRetiredJustNow > 0)
reasons.add("retirement of allocated nodes");
if (rejectedDueToInsufficientRealResources > 0)
reasons.add("insufficient real resources on hosts");
if (reasons.isEmpty()) return "";
return ": Not enough nodes available due to " + String.join(", ", reasons);
}
private static Integer parseIndex(String hostname) {
try {
return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1"));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e);
}
}
static class FlavorCount {
private final NodeResources flavor;
private final int count;
private FlavorCount(NodeResources flavor, int count) {
this.flavor = flavor;
this.count = count;
}
NodeResources getFlavor() {
return flavor;
}
int getCount() {
return count;
}
}
} |
Good point, we have to ensure current hosts have 0, 1, 2 or find another way to decide indices. Will look into it. | List<Integer> provisionIndices(int count) {
if (count == 0) return List.of();
NodeType hostType = requestedNodes.type().hostType();
if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count);
int offset = 1;
Set<Integer> currentIndices = allNodes.nodeType(hostType)
.stream()
.map(node -> node.allocation().get().membership().index())
.map(index -> index + offset)
.collect(Collectors.toSet());
List<Integer> indices = new ArrayList<>(count);
for (int i = offset; indices.size() < count; i++) {
if (!currentIndices.contains(i)) {
indices.add(i);
}
}
return indices;
} | Set<Integer> currentIndices = allNodes.nodeType(hostType) | List<Integer> provisionIndices(int count) {
if (count < 1) throw new IllegalArgumentException("Count must be positive");
NodeType hostType = requestedNodes.type().hostType();
if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count);
Set<Integer> currentIndices = allNodes.nodeType(hostType)
.stream()
.map(Node::hostname)
.map(NodeAllocation::parseIndex)
.collect(Collectors.toSet());
List<Integer> indices = new ArrayList<>(count);
for (int i = 1; indices.size() < count; i++) {
if (!currentIndices.contains(i)) {
indices.add(i);
}
}
return indices;
} | class NodeAllocation {
/** List of all nodes in node-repository */
private final NodeList allNodes;
/** The application this list is for */
private final ApplicationId application;
/** The cluster this list is for */
private final ClusterSpec cluster;
/** The requested nodes of this list */
private final NodeSpec requestedNodes;
/** The node candidates this has accepted so far, keyed on hostname */
private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>();
/** The number of already allocated nodes accepted and not retired */
private int accepted = 0;
/** The number of already allocated nodes accepted and not retired and not needing resize */
private int acceptedWithoutResizingRetired = 0;
/** The number of nodes rejected because of clashing parentHostname */
private int rejectedDueToClashingParentHost = 0;
/** The number of nodes rejected due to exclusivity constraints */
private int rejectedDueToExclusivity = 0;
private int rejectedDueToInsufficientRealResources = 0;
/** The number of nodes that just now was changed to retired */
private int wasRetiredJustNow = 0;
/** The node indexes to verify uniqueness of each members index */
private final Set<Integer> indexes = new HashSet<>();
/** The next membership index to assign to a new node */
private final Supplier<Integer> nextIndex;
private final NodeRepository nodeRepository;
private final NodeResourceLimits nodeResourceLimits;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
Supplier<Integer> nextIndex, NodeRepository nodeRepository) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
this.requestedNodes = requestedNodes;
this.nextIndex = nextIndex;
this.nodeRepository = nodeRepository;
nodeResourceLimits = new NodeResourceLimits(nodeRepository);
}
/**
* Offer some nodes to this. The nodes may have an allocation to a different application or cluster,
* an allocation to this cluster, or no current allocation (in which case one is assigned).
*
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
* @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc.
* @return the subset of offeredNodes which was accepted, with the correct allocation assigned
*/
List<Node> offer(List<NodeCandidate> nodesPrioritized) {
List<Node> accepted = new ArrayList<>();
for (NodeCandidate candidate : nodesPrioritized) {
if (candidate.allocation().isPresent()) {
Allocation allocation = candidate.allocation().get();
ClusterMembership membership = allocation.membership();
if ( ! allocation.owner().equals(application)) continue;
if ( ! membership.cluster().satisfies(cluster)) continue;
if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue;
if ( candidate.state() == Node.State.active && allocation.isRemovable()) continue;
if ( indexes.contains(membership.index())) continue;
boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable;
boolean acceptToRetire = acceptToRetire(candidate);
if ((! saturated() && hasCompatibleFlavor(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) {
candidate = candidate.withNode();
if (candidate.isValid())
accepted.add(acceptNode(candidate, shouldRetire(candidate, nodesPrioritized), resizeable));
}
}
else if (! saturated() && hasCompatibleFlavor(candidate)) {
if (requestedNodes.type() == NodeType.tenant && ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
if ( violatesParentHostPolicy(candidate)) {
++rejectedDueToClashingParentHost;
continue;
}
if ( violatesExclusivity(candidate)) {
++rejectedDueToExclusivity;
continue;
}
if (candidate.wantToRetire()) {
continue;
}
candidate = candidate.allocate(application,
ClusterMembership.from(cluster, nextIndex.get()),
requestedNodes.resources().orElse(candidate.resources()),
nodeRepository.clock().instant());
if (candidate.isValid())
accepted.add(acceptNode(candidate, false, false));
}
}
return accepted;
}
private boolean shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) {
if ( ! requestedNodes.considerRetiring())
return candidate.allocation().map(a -> a.membership().retired()).orElse(false);
if ( ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) return true;
if (violatesParentHostPolicy(candidate)) return true;
if ( ! hasCompatibleFlavor(candidate)) return true;
if (candidate.wantToRetire()) return true;
if (candidate.preferToRetire() && candidate.replacableBy(candidates)) return true;
if (violatesExclusivity(candidate)) return true;
return false;
}
private boolean violatesParentHostPolicy(NodeCandidate candidate) {
return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate);
}
private boolean checkForClashingParentHost() {
return nodeRepository.zone().system() == SystemName.main &&
nodeRepository.zone().environment().isProduction() &&
! application.instance().isTester();
}
private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) {
for (NodeCandidate acceptedNode : nodes.values()) {
if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() &&
acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) {
return true;
}
}
return false;
}
private boolean violatesExclusivity(NodeCandidate candidate) {
if (candidate.parentHostname().isEmpty()) return false;
if (nodeRepository.zone().getCloud().dynamicProvisioning())
return requestedNodes.isExclusive() &&
! candidate.parent.flatMap(Node::exclusiveTo).map(application::equals).orElse(false);
for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) {
if (nodeOnHost.allocation().isEmpty()) continue;
if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) {
if ( ! nodeOnHost.allocation().get().owner().equals(application)) return true;
}
}
return false;
}
/**
* Returns whether this node should be accepted into the cluster even if it is not currently desired
* (already enough nodes, or wrong flavor).
* Such nodes will be marked retired during finalization of the list of accepted nodes.
* The conditions for this are:
*
* This is a stateful node. These must always be retired before being removed to allow the cluster to
* migrate away data.
*
* This is a container node and it is not desired due to having the wrong flavor. In this case this
* will (normally) obtain for all the current nodes in the cluster and so retiring before removing must
* be used to avoid removing all the current nodes at once, before the newly allocated replacements are
* initialized. (In the other case, where a container node is not desired because we have enough nodes we
* do want to remove it immediately to get immediate feedback on how the size reduction works out.)
*/
private boolean acceptToRetire(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false;
if (candidate.allocation().get().membership().retired()) return true;
if (! requestedNodes.considerRetiring()) return false;
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleFlavor(candidate));
}
private boolean hasCompatibleFlavor(NodeCandidate candidate) {
return requestedNodes.isCompatible(candidate.flavor(), nodeRepository.flavors()) || candidate.isResizable;
}
private Node acceptNode(NodeCandidate candidate, boolean shouldRetire, boolean resizeable) {
Node node = candidate.toNode();
if (node.allocation().isPresent())
node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources())));
if (! shouldRetire) {
accepted++;
if (node.allocation().isEmpty()
|| ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired()))
acceptedWithoutResizingRetired++;
if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired()))
node = resize(node);
if (node.state() != Node.State.active)
node = node.unretire().removable(false);
} else {
++wasRetiredJustNow;
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
node = setCluster(cluster, node);
}
candidate = candidate.withNode(node);
indexes.add(node.allocation().get().membership().index());
nodes.put(node.hostname(), candidate);
return node;
}
private Node resize(Node node) {
NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
return node.with(new Flavor(requestedNodes.resources().get()
.with(hostResources.diskSpeed())
.with(hostResources.storageType())));
}
private Node setCluster(ClusterSpec cluster, Node node) {
ClusterMembership membership = node.allocation().get().membership().with(cluster);
return node.with(node.allocation().get().with(membership));
}
/** Returns true if no more nodes are needed in this list */
private boolean saturated() {
return requestedNodes.saturatedBy(acceptedWithoutResizingRetired);
}
/** Returns true if the content of this list is sufficient to meet the request */
boolean fulfilled() {
return requestedNodes.fulfilledBy(accepted());
}
/** Returns true if this allocation was already fulfilled and resulted in no new changes */
public boolean fulfilledAndNoChanges() {
return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty();
}
/**
* Returns {@link FlavorCount} describing the node deficit for the given {@link NodeSpec}.
*
* @return empty if the requested spec is already fulfilled. Otherwise returns {@link FlavorCount} containing the
* flavor and node count required to cover the deficit.
*/
Optional<FlavorCount> nodeDeficit() {
if (nodeType() != NodeType.config && nodeType() != NodeType.tenant) {
return Optional.empty();
}
return Optional.of(new FlavorCount(requestedNodes.resources().orElseGet(NodeResources::unspecified),
requestedNodes.fulfilledDeficitCount(accepted())))
.filter(flavorCount -> flavorCount.getCount() > 0);
}
/** Returns the indices to use when provisioning hosts for this */
/** The node type this is allocating */
NodeType nodeType() {
return requestedNodes.type();
}
/**
* Make the number of <i>non-retired</i> nodes in the list equal to the requested number
* of nodes, and retire the rest of the list. Only retire currently active nodes.
* Prefer to retire nodes of the wrong flavor.
* Make as few changes to the retired set as possible.
*
* @return the final list of nodes
*/
List<Node> finalNodes() {
int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count();
int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), currentRetiredCount) - currentRetiredCount;
if (deltaRetiredCount > 0) {
for (NodeCandidate candidate : byRetiringPriority(nodes.values())) {
if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) {
candidate = candidate.withNode();
candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant()));
nodes.put(candidate.toNode().hostname(), candidate);
if (--deltaRetiredCount == 0) break;
}
}
}
else if (deltaRetiredCount < 0) {
for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) {
if ( candidate.allocation().get().membership().retired() && hasCompatibleFlavor(candidate) ) {
candidate = candidate.withNode();
if (candidate.isResizable)
candidate = candidate.withNode(resize(candidate.toNode()));
candidate = candidate.withNode(candidate.toNode().unretire());
nodes.put(candidate.toNode().hostname(), candidate);
if (++deltaRetiredCount == 0) break;
}
}
}
for (NodeCandidate candidate : nodes.values()) {
candidate = candidate.withNode();
Allocation allocation = candidate.allocation().get();
candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership()
.with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive())))));
nodes.put(candidate.toNode().hostname(), candidate);
}
return nodes.values().stream().map(n -> n.toNode()).collect(Collectors.toList());
}
List<Node> reservableNodes() {
EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved);
return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state()));
}
List<Node> newNodes() {
return nodesFilter(n -> n.isNew);
}
private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) {
return nodes.values().stream()
.filter(predicate)
.map(n -> n.toNode())
.collect(Collectors.toList());
}
/** Returns the number of nodes accepted this far */
private int accepted() {
if (nodeType() == NodeType.tenant) return accepted;
return allNodes.nodeType(nodeType()).size();
}
/** Prefer to retire nodes we want the least */
private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream().sorted(Comparator.reverseOrder()).collect(Collectors.toList());
}
/** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */
private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream()
.sorted(Comparator.comparing(NodeCandidate::wantToRetire)
.thenComparing(n -> n.allocation().get().membership().index()))
.collect(Collectors.toList());
}
public String outOfCapacityDetails() {
List<String> reasons = new ArrayList<>();
if (rejectedDueToExclusivity > 0)
reasons.add("host exclusivity constraints");
if (rejectedDueToClashingParentHost > 0)
reasons.add("insufficient nodes available on separate physical hosts");
if (wasRetiredJustNow > 0)
reasons.add("retirement of allocated nodes");
if (rejectedDueToInsufficientRealResources > 0)
reasons.add("insufficient real resources on hosts");
if (reasons.isEmpty()) return "";
return ": Not enough nodes available due to " + String.join(", ", reasons);
}
static class FlavorCount {
private final NodeResources flavor;
private final int count;
private FlavorCount(NodeResources flavor, int count) {
this.flavor = flavor;
this.count = count;
}
NodeResources getFlavor() {
return flavor;
}
int getCount() {
return count;
}
}
} | class NodeAllocation {
/** List of all nodes in node-repository */
private final NodeList allNodes;
/** The application this list is for */
private final ApplicationId application;
/** The cluster this list is for */
private final ClusterSpec cluster;
/** The requested nodes of this list */
private final NodeSpec requestedNodes;
/** The node candidates this has accepted so far, keyed on hostname */
private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>();
/** The number of already allocated nodes accepted and not retired */
private int accepted = 0;
/** The number of already allocated nodes accepted and not retired and not needing resize */
private int acceptedWithoutResizingRetired = 0;
/** The number of nodes rejected because of clashing parentHostname */
private int rejectedDueToClashingParentHost = 0;
/** The number of nodes rejected due to exclusivity constraints */
private int rejectedDueToExclusivity = 0;
private int rejectedDueToInsufficientRealResources = 0;
/** The number of nodes that just now was changed to retired */
private int wasRetiredJustNow = 0;
/** The node indexes to verify uniqueness of each members index */
private final Set<Integer> indexes = new HashSet<>();
/** The next membership index to assign to a new node */
private final Supplier<Integer> nextIndex;
private final NodeRepository nodeRepository;
private final NodeResourceLimits nodeResourceLimits;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
Supplier<Integer> nextIndex, NodeRepository nodeRepository) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
this.requestedNodes = requestedNodes;
this.nextIndex = nextIndex;
this.nodeRepository = nodeRepository;
nodeResourceLimits = new NodeResourceLimits(nodeRepository);
}
/**
* Offer some nodes to this. The nodes may have an allocation to a different application or cluster,
* an allocation to this cluster, or no current allocation (in which case one is assigned).
*
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
* @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc.
* @return the subset of offeredNodes which was accepted, with the correct allocation assigned
*/
List<Node> offer(List<NodeCandidate> nodesPrioritized) {
List<Node> accepted = new ArrayList<>();
for (NodeCandidate candidate : nodesPrioritized) {
if (candidate.allocation().isPresent()) {
Allocation allocation = candidate.allocation().get();
ClusterMembership membership = allocation.membership();
if ( ! allocation.owner().equals(application)) continue;
if ( ! membership.cluster().satisfies(cluster)) continue;
if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue;
if ( candidate.state() == Node.State.active && allocation.isRemovable()) continue;
if ( indexes.contains(membership.index())) continue;
boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable;
boolean acceptToRetire = acceptToRetire(candidate);
if ((! saturated() && hasCompatibleFlavor(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) {
candidate = candidate.withNode();
if (candidate.isValid())
accepted.add(acceptNode(candidate, shouldRetire(candidate, nodesPrioritized), resizeable));
}
}
else if (! saturated() && hasCompatibleFlavor(candidate)) {
if (requestedNodes.type() == NodeType.tenant && ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
if ( violatesParentHostPolicy(candidate)) {
++rejectedDueToClashingParentHost;
continue;
}
if ( violatesExclusivity(candidate)) {
++rejectedDueToExclusivity;
continue;
}
if (candidate.wantToRetire()) {
continue;
}
candidate = candidate.allocate(application,
ClusterMembership.from(cluster, nextIndex.get()),
requestedNodes.resources().orElse(candidate.resources()),
nodeRepository.clock().instant());
if (candidate.isValid())
accepted.add(acceptNode(candidate, false, false));
}
}
return accepted;
}
private boolean shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) {
if ( ! requestedNodes.considerRetiring())
return candidate.allocation().map(a -> a.membership().retired()).orElse(false);
if ( ! nodeResourceLimits.isWithinRealLimits(candidate, cluster)) return true;
if (violatesParentHostPolicy(candidate)) return true;
if ( ! hasCompatibleFlavor(candidate)) return true;
if (candidate.wantToRetire()) return true;
if (candidate.preferToRetire() && candidate.replacableBy(candidates)) return true;
if (violatesExclusivity(candidate)) return true;
return false;
}
private boolean violatesParentHostPolicy(NodeCandidate candidate) {
return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate);
}
private boolean checkForClashingParentHost() {
return nodeRepository.zone().system() == SystemName.main &&
nodeRepository.zone().environment().isProduction() &&
! application.instance().isTester();
}
private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) {
for (NodeCandidate acceptedNode : nodes.values()) {
if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() &&
acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) {
return true;
}
}
return false;
}
private boolean violatesExclusivity(NodeCandidate candidate) {
if (candidate.parentHostname().isEmpty()) return false;
if (nodeRepository.zone().getCloud().dynamicProvisioning())
return requestedNodes.isExclusive() &&
! candidate.parent.flatMap(Node::exclusiveTo).map(application::equals).orElse(false);
for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) {
if (nodeOnHost.allocation().isEmpty()) continue;
if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) {
if ( ! nodeOnHost.allocation().get().owner().equals(application)) return true;
}
}
return false;
}
/**
* Returns whether this node should be accepted into the cluster even if it is not currently desired
* (already enough nodes, or wrong flavor).
* Such nodes will be marked retired during finalization of the list of accepted nodes.
* The conditions for this are:
*
* This is a stateful node. These must always be retired before being removed to allow the cluster to
* migrate away data.
*
* This is a container node and it is not desired due to having the wrong flavor. In this case this
* will (normally) obtain for all the current nodes in the cluster and so retiring before removing must
* be used to avoid removing all the current nodes at once, before the newly allocated replacements are
* initialized. (In the other case, where a container node is not desired because we have enough nodes we
* do want to remove it immediately to get immediate feedback on how the size reduction works out.)
*/
private boolean acceptToRetire(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false;
if (candidate.allocation().get().membership().retired()) return true;
if (! requestedNodes.considerRetiring()) return false;
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleFlavor(candidate));
}
private boolean hasCompatibleFlavor(NodeCandidate candidate) {
return requestedNodes.isCompatible(candidate.flavor(), nodeRepository.flavors()) || candidate.isResizable;
}
private Node acceptNode(NodeCandidate candidate, boolean shouldRetire, boolean resizeable) {
Node node = candidate.toNode();
if (node.allocation().isPresent())
node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources())));
if (! shouldRetire) {
accepted++;
if (node.allocation().isEmpty()
|| ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired()))
acceptedWithoutResizingRetired++;
if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired()))
node = resize(node);
if (node.state() != Node.State.active)
node = node.unretire().removable(false);
} else {
++wasRetiredJustNow;
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
node = setCluster(cluster, node);
}
candidate = candidate.withNode(node);
indexes.add(node.allocation().get().membership().index());
nodes.put(node.hostname(), candidate);
return node;
}
private Node resize(Node node) {
NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
return node.with(new Flavor(requestedNodes.resources().get()
.with(hostResources.diskSpeed())
.with(hostResources.storageType())));
}
private Node setCluster(ClusterSpec cluster, Node node) {
ClusterMembership membership = node.allocation().get().membership().with(cluster);
return node.with(node.allocation().get().with(membership));
}
/** Returns true if no more nodes are needed in this list */
private boolean saturated() {
return requestedNodes.saturatedBy(acceptedWithoutResizingRetired);
}
/** Returns true if the content of this list is sufficient to meet the request */
boolean fulfilled() {
return requestedNodes.fulfilledBy(accepted());
}
/** Returns true if this allocation was already fulfilled and resulted in no new changes */
public boolean fulfilledAndNoChanges() {
return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty();
}
/**
* Returns {@link FlavorCount} describing the node deficit for the given {@link NodeSpec}.
*
* @return empty if the requested spec is already fulfilled. Otherwise returns {@link FlavorCount} containing the
* flavor and node count required to cover the deficit.
*/
Optional<FlavorCount> nodeDeficit() {
if (nodeType() != NodeType.config && nodeType() != NodeType.tenant) {
return Optional.empty();
}
return Optional.of(new FlavorCount(requestedNodes.resources().orElseGet(NodeResources::unspecified),
requestedNodes.fulfilledDeficitCount(accepted())))
.filter(flavorCount -> flavorCount.getCount() > 0);
}
/** Returns the indices to use when provisioning hosts for this */
/** The node type this is allocating */
NodeType nodeType() {
return requestedNodes.type();
}
/**
* Make the number of <i>non-retired</i> nodes in the list equal to the requested number
* of nodes, and retire the rest of the list. Only retire currently active nodes.
* Prefer to retire nodes of the wrong flavor.
* Make as few changes to the retired set as possible.
*
* @return the final list of nodes
*/
List<Node> finalNodes() {
int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count();
int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), currentRetiredCount) - currentRetiredCount;
if (deltaRetiredCount > 0) {
for (NodeCandidate candidate : byRetiringPriority(nodes.values())) {
if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) {
candidate = candidate.withNode();
candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant()));
nodes.put(candidate.toNode().hostname(), candidate);
if (--deltaRetiredCount == 0) break;
}
}
}
else if (deltaRetiredCount < 0) {
for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) {
if ( candidate.allocation().get().membership().retired() && hasCompatibleFlavor(candidate) ) {
candidate = candidate.withNode();
if (candidate.isResizable)
candidate = candidate.withNode(resize(candidate.toNode()));
candidate = candidate.withNode(candidate.toNode().unretire());
nodes.put(candidate.toNode().hostname(), candidate);
if (++deltaRetiredCount == 0) break;
}
}
}
for (NodeCandidate candidate : nodes.values()) {
candidate = candidate.withNode();
Allocation allocation = candidate.allocation().get();
candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership()
.with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive())))));
nodes.put(candidate.toNode().hostname(), candidate);
}
return nodes.values().stream().map(n -> n.toNode()).collect(Collectors.toList());
}
List<Node> reservableNodes() {
EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved);
return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state()));
}
List<Node> newNodes() {
return nodesFilter(n -> n.isNew);
}
private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) {
return nodes.values().stream()
.filter(predicate)
.map(n -> n.toNode())
.collect(Collectors.toList());
}
/** Returns the number of nodes accepted this far */
private int accepted() {
if (nodeType() == NodeType.tenant) return accepted;
return allNodes.nodeType(nodeType()).size();
}
/** Prefer to retire nodes we want the least */
private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream().sorted(Comparator.reverseOrder()).collect(Collectors.toList());
}
/** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */
private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) {
return candidates.stream()
.sorted(Comparator.comparing(NodeCandidate::wantToRetire)
.thenComparing(n -> n.allocation().get().membership().index()))
.collect(Collectors.toList());
}
public String outOfCapacityDetails() {
List<String> reasons = new ArrayList<>();
if (rejectedDueToExclusivity > 0)
reasons.add("host exclusivity constraints");
if (rejectedDueToClashingParentHost > 0)
reasons.add("insufficient nodes available on separate physical hosts");
if (wasRetiredJustNow > 0)
reasons.add("retirement of allocated nodes");
if (rejectedDueToInsufficientRealResources > 0)
reasons.add("insufficient real resources on hosts");
if (reasons.isEmpty()) return "";
return ": Not enough nodes available due to " + String.join(", ", reasons);
}
private static Integer parseIndex(String hostname) {
try {
return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1"));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e);
}
}
static class FlavorCount {
private final NodeResources flavor;
private final int count;
private FlavorCount(NodeResources flavor, int count) {
this.flavor = flavor;
this.count = count;
}
NodeResources getFlavor() {
return flavor;
}
int getCount() {
return count;
}
}
} |
```suggestion tester.assertResources("Advice to scale up since we assume we need 2x cpu for growth when no data", ``` | public void test_autoscaling_considers_growth_rate() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 1, 1, resources);
ClusterResources max = new ClusterResources(10, 1, resources);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
tester.assertResources("Advice to scale up sine we assume we need 2x cpu for growth when no data",
7, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.setScalingDuration(application1, cluster1.id(), Duration.ofMinutes(5));
tester.addQueryRateMeasurements(application1, cluster1.id(),
100,
t -> 10.0 + (t < 50 ? t : 100 - t));
tester.assertResources("Advice to scale down since observed growth is much slower than scaling time",
4, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.clearQueryRateMeasurements(application1, cluster1.id());
System.out.println("The fast growth one");
tester.setScalingDuration(application1, cluster1.id(), Duration.ofMinutes(60));
tester.addQueryRateMeasurements(application1, cluster1.id(),
100,
t -> 10.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49)));
tester.assertResources("Advice to scale up since observed growth is much faster than scaling time",
10, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
} | tester.assertResources("Advice to scale up sine we assume we need 2x cpu for growth when no data", | public void test_autoscaling_considers_growth_rate() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 1, 1, resources);
ClusterResources max = new ClusterResources(10, 1, resources);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
tester.assertResources("Advice to scale up since we assume we need 2x cpu for growth when no data",
7, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.setScalingDuration(application1, cluster1.id(), Duration.ofMinutes(5));
tester.addQueryRateMeasurements(application1, cluster1.id(),
100,
t -> 10.0 + (t < 50 ? t : 100 - t));
tester.assertResources("Advice to scale down since observed growth is much slower than scaling time",
4, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.clearQueryRateMeasurements(application1, cluster1.id());
System.out.println("The fast growth one");
tester.setScalingDuration(application1, cluster1.id(), Duration.ofMinutes(60));
tester.addQueryRateMeasurements(application1, cluster1.id(),
100,
t -> 10.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49)));
tester.assertResources("Advice to scale up since observed growth is much faster than scaling time",
10, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
} | class AutoscalingTest {
@Test
public void test_autoscaling_single_content_group() {
NodeResources hostResources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1,
new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.any));
ClusterResources max = new ClusterResources(20, 1,
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 5, 1, hostResources);
tester.clock().advance(Duration.ofDays(1));
assertTrue("No measurements -> No change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
tester.addCpuMeasurements(0.25f, 1f, 59, application1);
assertTrue("Too few measurements -> No change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
tester.clock().advance(Duration.ofDays(1));
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high",
14, 1, 1.4, 30.8, 30.8,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.deploy(application1, cluster1, scaledResources);
assertTrue("Cluster in flux -> No further change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
tester.deactivateRetired(application1, cluster1, scaledResources);
tester.clock().advance(Duration.ofDays(2));
tester.addCpuMeasurements(0.8f, 1f, 3, application1);
assertTrue("Load change is large, but insufficient measurements for new config -> No change",
tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
tester.addCpuMeasurements(0.19f, 1f, 100, application1);
assertEquals("Load change is small -> No change", Optional.empty(), tester.autoscale(application1, cluster1.id(), min, max).target());
tester.addCpuMeasurements(0.1f, 1f, 120, application1);
tester.assertResources("Scaling down to minimum since usage has gone down significantly",
15, 1, 1.0, 28.6, 28.6,
tester.autoscale(application1, cluster1.id(), min, max).target());
var events = tester.nodeRepository().applications().get(application1).get().cluster(cluster1.id()).get().scalingEvents();
}
/** We prefer fewer nodes for container clusters as (we assume) they all use the same disk and memory */
@Test
public void test_autoscaling_single_container_group() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
ClusterResources scaledResources = tester.assertResources("Scaling up since cpu usage is too high",
7, 1, 2.5, 80.0, 80.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.deploy(application1, cluster1, scaledResources);
tester.deactivateRetired(application1, cluster1, scaledResources);
tester.addCpuMeasurements(0.1f, 1f, 120, application1);
tester.assertResources("Scaling down since cpu usage has gone down",
4, 1, 2.5, 68.6, 68.6,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void autoscaling_handles_disk_setting_changes() {
NodeResources hostResources = new NodeResources(3, 100, 100, 1, NodeResources.DiskSpeed.slow);
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 5, 1, hostResources);
tester.nodeRepository().nodes().list().owner(application1).stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.slow);
tester.clock().advance(Duration.ofDays(2));
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
ClusterResources min = new ClusterResources( 2, 1,
new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.any));
ClusterResources max = new ClusterResources(20, 1,
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any));
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high",
14, 1, 1.4, 30.8, 30.8,
tester.autoscale(application1, cluster1.id(), min, max).target());
assertEquals("Disk speed from min/max is used",
NodeResources.DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
tester.deploy(application1, cluster1, scaledResources);
tester.nodeRepository().nodes().list().owner(application1).stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any);
}
@Test
public void autoscaling_target_preserves_any() {
NodeResources hostResources = new NodeResources(3, 100, 100, 1);
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
NodeResources resources = new NodeResources(1, 10, 10, 1);
var min = new ClusterResources( 2, 1, resources.with(NodeResources.DiskSpeed.any));
var max = new ClusterResources( 10, 1, resources.with(NodeResources.DiskSpeed.any));
tester.deploy(application1, cluster1, Capacity.from(min, max));
assertTrue(tester.nodeRepository().applications().get(application1).get().cluster(cluster1.id()).get().targetResources().isEmpty());
tester.deploy(application1, cluster1, Capacity.from(min, max));
assertEquals(NodeResources.DiskSpeed.any,
tester.nodeRepository().nodes().list().owner(application1).cluster(cluster1.id()).first().get()
.allocation().get().requestedResources().diskSpeed());
tester.clock().advance(Duration.ofDays(2));
tester.addCpuMeasurements(0.8f, 1f, 120, application1);
Autoscaler.Advice advice = tester.autoscale(application1, cluster1.id(), min, max);
assertEquals(NodeResources.DiskSpeed.any, advice.target().get().nodeResources().diskSpeed());
}
@Test
public void autoscaling_respects_upper_limit() {
NodeResources hostResources = new NodeResources(6, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1,
new NodeResources(1.9, 70, 70, 1));
tester.addMeasurements(0.25f, 0.95f, 0.95f, 0, 120, application1);
tester.assertResources("Scaling up to limit since resource usage is too high",
6, 1, 2.4, 78.0, 79.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void autoscaling_respects_lower_limit() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 4, 1, new NodeResources(1.8, 7.4, 8.5, 1));
ClusterResources max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.addMeasurements(0.05f, 0.05f, 0.05f, 0, 120, application1);
tester.assertResources("Scaling down to limit since resource usage is low",
4, 1, 1.8, 7.4, 10.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void autoscaling_respects_group_limit() {
NodeResources hostResources = new NodeResources(30.0, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(18, 6, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 5, new NodeResources(3.0, 10, 10, 1));
tester.addCpuMeasurements( 0.3f, 1f, 240, application1);
tester.assertResources("Scaling up since resource usage is too high",
6, 6, 3.6, 8.0, 10.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void test_autoscaling_limits_when_min_equals_max() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = min;
AutoscalingTester tester = new AutoscalingTester(resources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.clock().advance(Duration.ofDays(1));
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
assertTrue(tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
}
@Test
public void suggestions_ignores_limits() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = min;
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
tester.assertResources("Scaling up since resource usage is too high",
7, 1, 2.5, 80.0, 80.0,
tester.suggest(application1, cluster1.id(), min, max).target());
}
@Test
public void not_using_out_of_service_measurements() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources(2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(5, 1, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 2, 1, resources);
tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, false, true, 120, application1);
assertTrue("Not scaling up since nodes were measured while cluster was unstable",
tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
}
@Test
public void not_using_unstable_measurements() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources(2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(5, 1, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 2, 1, resources);
tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, true, false, 120, application1);
assertTrue("Not scaling up since nodes were measured while cluster was unstable",
tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
}
@Test
public void test_autoscaling_group_size_1() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 20, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 5, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
tester.assertResources("Scaling up since resource usage is too high",
7, 7, 2.5, 80.0, 80.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void test_autoscaling_groupsize_by_cpu() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 3, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(21, 7, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 6, 2, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
tester.assertResources("Scaling up since resource usage is too high, changing to 1 group is cheaper",
8, 1, 2.7, 83.3, 83.3,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void test_autoscaling_group_size() {
NodeResources hostResources = new NodeResources(100, 1000, 1000, 100);
ClusterResources min = new ClusterResources( 3, 2, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(30, 30, new NodeResources(100, 100, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 6, 2, new NodeResources(10, 100, 100, 1));
tester.clock().advance(Duration.ofDays(1));
tester.addMemMeasurements(1.0f, 1f, 1000, application1);
tester.assertResources("Increase group size to reduce memory load",
8, 2, 13.6, 89.3, 62.5,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void autoscaling_avoids_illegal_configurations() {
NodeResources hostResources = new NodeResources(6, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 6, 1, hostResources.withVcpu(hostResources.vcpu() / 2));
tester.clock().advance(Duration.ofDays(2));
tester.addMemMeasurements(0.02f, 0.95f, 120, application1);
tester.assertResources("Scaling down",
6, 1, 2.9, 4.0, 95.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void scaling_down_only_after_delay() {
NodeResources hostResources = new NodeResources(6, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 6, 1, hostResources.withVcpu(hostResources.vcpu() / 2));
tester.addMemMeasurements(0.02f, 0.95f, 120, application1);
assertTrue(tester.autoscale(application1, cluster1.id(), min, max).target().isEmpty());
tester.clock().advance(Duration.ofDays(2));
tester.addMemMeasurements(0.02f, 0.95f, 120, application1);
tester.assertResources("Scaling down",
6, 1, 2.9, 4.0, 95.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void real_resources_are_taken_into_account() {
NodeResources hostResources = new NodeResources(60, 100, 1000, 10);
ClusterResources min = new ClusterResources(2, 1, new NodeResources( 2, 20, 200, 1));
ClusterResources max = new ClusterResources(4, 1, new NodeResources(60, 100, 1000, 1));
{
AutoscalingTester tester = new AutoscalingTester(hostResources, new OnlySubtractingWhenForecastingCalculator(0));
ApplicationId application1 = tester.applicationId("app1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, min);
tester.addMeasurements(1.0f, 1.0f, 0.7f, 0, 1000, application1);
tester.assertResources("Scaling up",
4, 1, 7.4, 20, 200,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
{
AutoscalingTester tester = new AutoscalingTester(hostResources, new OnlySubtractingWhenForecastingCalculator(15));
ApplicationId application1 = tester.applicationId("app1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, min);
tester.addMeasurements(1.0f, 1.0f, 0.7f, 0, 1000, application1);
tester.assertResources("Scaling up",
4, 1, 7.4, 34, 200,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
}
@Test
public void test_autoscaling_with_dynamic_provisioning() {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
List<Flavor> flavors = new ArrayList<>();
flavors.add(new Flavor("aws-xlarge", new NodeResources(3, 200, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote)));
flavors.add(new Flavor("aws-large", new NodeResources(3, 150, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote)));
flavors.add(new Flavor("aws-medium", new NodeResources(3, 100, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote)));
flavors.add(new Flavor("aws-small", new NodeResources(3, 80, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote)));
AutoscalingTester tester = new AutoscalingTester(new Zone(Cloud.builder()
.dynamicProvisioning(true)
.build(),
SystemName.main,
Environment.prod, RegionName.from("us-east")),
flavors);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 5, 1, new NodeResources(3, 103, 100, 1));
tester.clock().advance(Duration.ofDays(2));
tester.addMemMeasurements(0.9f, 0.6f, 120, application1);
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high.",
8, 1, 3, 83, 34.3,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.deploy(application1, cluster1, scaledResources);
tester.deactivateRetired(application1, cluster1, scaledResources);
tester.clock().advance(Duration.ofDays(2));
tester.addMemMeasurements(0.3f, 0.6f, 1000, application1);
tester.assertResources("Scaling down since resource usage has gone down",
6, 1, 3, 83, 28.8,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void test_autoscaling_considers_read_share() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 1, 1, resources);
ClusterResources max = new ClusterResources(10, 1, resources);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
tester.assertResources("Advice to scale up since we set aside for bcp by default",
7, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.storeReadShare(0.25, 0.5, application1);
tester.assertResources("Half of global share is the same as the default assumption used above",
7, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.storeReadShare(0.5, 0.5, application1);
tester.assertResources("Advice to scale down since we don't need room for bcp",
4, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
/**
* This calculator subtracts the memory tax when forecasting overhead, but not when actually
* returning information about nodes. This is allowed because the forecast is a *worst case*.
* It is useful here because it ensures that we end up with the same real (and therefore target)
* resources regardless of tax which makes it easier to compare behavior with different tax levels.
*/
private static class OnlySubtractingWhenForecastingCalculator implements HostResourcesCalculator {
private final int memoryTaxGb;
public OnlySubtractingWhenForecastingCalculator(int memoryTaxGb) {
this.memoryTaxGb = memoryTaxGb;
}
@Override
public NodeResources realResourcesOf(Nodelike node, NodeRepository nodeRepository, boolean exclusive) {
return node.resources();
}
@Override
public NodeResources advertisedResourcesOf(Flavor flavor) {
return flavor.resources();
}
@Override
public NodeResources requestToReal(NodeResources resources, boolean exclusive) {
return resources.withMemoryGb(resources.memoryGb() - memoryTaxGb);
}
@Override
public NodeResources realToRequest(NodeResources resources, boolean exclusive) {
return resources.withMemoryGb(resources.memoryGb() + memoryTaxGb);
}
@Override
public long thinPoolSizeInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
}
} | class AutoscalingTest {
@Test
public void test_autoscaling_single_content_group() {
NodeResources hostResources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1,
new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.any));
ClusterResources max = new ClusterResources(20, 1,
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 5, 1, hostResources);
tester.clock().advance(Duration.ofDays(1));
assertTrue("No measurements -> No change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
tester.addCpuMeasurements(0.25f, 1f, 59, application1);
assertTrue("Too few measurements -> No change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
tester.clock().advance(Duration.ofDays(1));
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high",
14, 1, 1.4, 30.8, 30.8,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.deploy(application1, cluster1, scaledResources);
assertTrue("Cluster in flux -> No further change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
tester.deactivateRetired(application1, cluster1, scaledResources);
tester.clock().advance(Duration.ofDays(2));
tester.addCpuMeasurements(0.8f, 1f, 3, application1);
assertTrue("Load change is large, but insufficient measurements for new config -> No change",
tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
tester.addCpuMeasurements(0.19f, 1f, 100, application1);
assertEquals("Load change is small -> No change", Optional.empty(), tester.autoscale(application1, cluster1.id(), min, max).target());
tester.addCpuMeasurements(0.1f, 1f, 120, application1);
tester.assertResources("Scaling down to minimum since usage has gone down significantly",
15, 1, 1.0, 28.6, 28.6,
tester.autoscale(application1, cluster1.id(), min, max).target());
var events = tester.nodeRepository().applications().get(application1).get().cluster(cluster1.id()).get().scalingEvents();
}
/** We prefer fewer nodes for container clusters as (we assume) they all use the same disk and memory */
@Test
public void test_autoscaling_single_container_group() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
ClusterResources scaledResources = tester.assertResources("Scaling up since cpu usage is too high",
7, 1, 2.5, 80.0, 80.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.deploy(application1, cluster1, scaledResources);
tester.deactivateRetired(application1, cluster1, scaledResources);
tester.addCpuMeasurements(0.1f, 1f, 120, application1);
tester.assertResources("Scaling down since cpu usage has gone down",
4, 1, 2.5, 68.6, 68.6,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void autoscaling_handles_disk_setting_changes() {
NodeResources hostResources = new NodeResources(3, 100, 100, 1, NodeResources.DiskSpeed.slow);
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 5, 1, hostResources);
tester.nodeRepository().nodes().list().owner(application1).stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.slow);
tester.clock().advance(Duration.ofDays(2));
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
ClusterResources min = new ClusterResources( 2, 1,
new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.any));
ClusterResources max = new ClusterResources(20, 1,
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any));
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high",
14, 1, 1.4, 30.8, 30.8,
tester.autoscale(application1, cluster1.id(), min, max).target());
assertEquals("Disk speed from min/max is used",
NodeResources.DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
tester.deploy(application1, cluster1, scaledResources);
tester.nodeRepository().nodes().list().owner(application1).stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any);
}
@Test
public void autoscaling_target_preserves_any() {
NodeResources hostResources = new NodeResources(3, 100, 100, 1);
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
NodeResources resources = new NodeResources(1, 10, 10, 1);
var min = new ClusterResources( 2, 1, resources.with(NodeResources.DiskSpeed.any));
var max = new ClusterResources( 10, 1, resources.with(NodeResources.DiskSpeed.any));
tester.deploy(application1, cluster1, Capacity.from(min, max));
assertTrue(tester.nodeRepository().applications().get(application1).get().cluster(cluster1.id()).get().targetResources().isEmpty());
tester.deploy(application1, cluster1, Capacity.from(min, max));
assertEquals(NodeResources.DiskSpeed.any,
tester.nodeRepository().nodes().list().owner(application1).cluster(cluster1.id()).first().get()
.allocation().get().requestedResources().diskSpeed());
tester.clock().advance(Duration.ofDays(2));
tester.addCpuMeasurements(0.8f, 1f, 120, application1);
Autoscaler.Advice advice = tester.autoscale(application1, cluster1.id(), min, max);
assertEquals(NodeResources.DiskSpeed.any, advice.target().get().nodeResources().diskSpeed());
}
@Test
public void autoscaling_respects_upper_limit() {
NodeResources hostResources = new NodeResources(6, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1,
new NodeResources(1.9, 70, 70, 1));
tester.addMeasurements(0.25f, 0.95f, 0.95f, 0, 120, application1);
tester.assertResources("Scaling up to limit since resource usage is too high",
6, 1, 2.4, 78.0, 79.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void autoscaling_respects_lower_limit() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 4, 1, new NodeResources(1.8, 7.4, 8.5, 1));
ClusterResources max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.addMeasurements(0.05f, 0.05f, 0.05f, 0, 120, application1);
tester.assertResources("Scaling down to limit since resource usage is low",
4, 1, 1.8, 7.4, 10.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void autoscaling_respects_group_limit() {
NodeResources hostResources = new NodeResources(30.0, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(18, 6, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 5, new NodeResources(3.0, 10, 10, 1));
tester.addCpuMeasurements( 0.3f, 1f, 240, application1);
tester.assertResources("Scaling up since resource usage is too high",
6, 6, 3.6, 8.0, 10.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void test_autoscaling_limits_when_min_equals_max() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = min;
AutoscalingTester tester = new AutoscalingTester(resources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.clock().advance(Duration.ofDays(1));
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
assertTrue(tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
}
@Test
public void suggestions_ignores_limits() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = min;
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
tester.assertResources("Scaling up since resource usage is too high",
7, 1, 2.5, 80.0, 80.0,
tester.suggest(application1, cluster1.id(), min, max).target());
}
@Test
public void not_using_out_of_service_measurements() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources(2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(5, 1, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 2, 1, resources);
tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, false, true, 120, application1);
assertTrue("Not scaling up since nodes were measured while cluster was unstable",
tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
}
@Test
public void not_using_unstable_measurements() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources(2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(5, 1, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 2, 1, resources);
tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, true, false, 120, application1);
assertTrue("Not scaling up since nodes were measured while cluster was unstable",
tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
}
@Test
public void test_autoscaling_group_size_1() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 20, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 5, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
tester.assertResources("Scaling up since resource usage is too high",
7, 7, 2.5, 80.0, 80.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void test_autoscaling_groupsize_by_cpu() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 3, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(21, 7, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 6, 2, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
tester.assertResources("Scaling up since resource usage is too high, changing to 1 group is cheaper",
8, 1, 2.7, 83.3, 83.3,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void test_autoscaling_group_size() {
NodeResources hostResources = new NodeResources(100, 1000, 1000, 100);
ClusterResources min = new ClusterResources( 3, 2, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(30, 30, new NodeResources(100, 100, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 6, 2, new NodeResources(10, 100, 100, 1));
tester.clock().advance(Duration.ofDays(1));
tester.addMemMeasurements(1.0f, 1f, 1000, application1);
tester.assertResources("Increase group size to reduce memory load",
8, 2, 13.6, 89.3, 62.5,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void autoscaling_avoids_illegal_configurations() {
NodeResources hostResources = new NodeResources(6, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 6, 1, hostResources.withVcpu(hostResources.vcpu() / 2));
tester.clock().advance(Duration.ofDays(2));
tester.addMemMeasurements(0.02f, 0.95f, 120, application1);
tester.assertResources("Scaling down",
6, 1, 2.9, 4.0, 95.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void scaling_down_only_after_delay() {
NodeResources hostResources = new NodeResources(6, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 6, 1, hostResources.withVcpu(hostResources.vcpu() / 2));
tester.addMemMeasurements(0.02f, 0.95f, 120, application1);
assertTrue(tester.autoscale(application1, cluster1.id(), min, max).target().isEmpty());
tester.clock().advance(Duration.ofDays(2));
tester.addMemMeasurements(0.02f, 0.95f, 120, application1);
tester.assertResources("Scaling down",
6, 1, 2.9, 4.0, 95.0,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void real_resources_are_taken_into_account() {
NodeResources hostResources = new NodeResources(60, 100, 1000, 10);
ClusterResources min = new ClusterResources(2, 1, new NodeResources( 2, 20, 200, 1));
ClusterResources max = new ClusterResources(4, 1, new NodeResources(60, 100, 1000, 1));
{
AutoscalingTester tester = new AutoscalingTester(hostResources, new OnlySubtractingWhenForecastingCalculator(0));
ApplicationId application1 = tester.applicationId("app1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, min);
tester.addMeasurements(1.0f, 1.0f, 0.7f, 0, 1000, application1);
tester.assertResources("Scaling up",
4, 1, 7.4, 20, 200,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
{
AutoscalingTester tester = new AutoscalingTester(hostResources, new OnlySubtractingWhenForecastingCalculator(15));
ApplicationId application1 = tester.applicationId("app1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, min);
tester.addMeasurements(1.0f, 1.0f, 0.7f, 0, 1000, application1);
tester.assertResources("Scaling up",
4, 1, 7.4, 34, 200,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
}
@Test
public void test_autoscaling_with_dynamic_provisioning() {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
List<Flavor> flavors = new ArrayList<>();
flavors.add(new Flavor("aws-xlarge", new NodeResources(3, 200, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote)));
flavors.add(new Flavor("aws-large", new NodeResources(3, 150, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote)));
flavors.add(new Flavor("aws-medium", new NodeResources(3, 100, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote)));
flavors.add(new Flavor("aws-small", new NodeResources(3, 80, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote)));
AutoscalingTester tester = new AutoscalingTester(new Zone(Cloud.builder()
.dynamicProvisioning(true)
.build(),
SystemName.main,
Environment.prod, RegionName.from("us-east")),
flavors);
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
tester.deploy(application1, cluster1, 5, 1, new NodeResources(3, 103, 100, 1));
tester.clock().advance(Duration.ofDays(2));
tester.addMemMeasurements(0.9f, 0.6f, 120, application1);
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high.",
8, 1, 3, 83, 34.3,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.deploy(application1, cluster1, scaledResources);
tester.deactivateRetired(application1, cluster1, scaledResources);
tester.clock().advance(Duration.ofDays(2));
tester.addMemMeasurements(0.3f, 0.6f, 1000, application1);
tester.assertResources("Scaling down since resource usage has gone down",
6, 1, 3, 83, 28.8,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
public void test_autoscaling_considers_read_share() {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 1, 1, resources);
ClusterResources max = new ClusterResources(10, 1, resources);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
tester.deploy(application1, cluster1, 5, 1, resources);
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
tester.assertResources("Advice to scale up since we set aside for bcp by default",
7, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.storeReadShare(0.25, 0.5, application1);
tester.assertResources("Half of global share is the same as the default assumption used above",
7, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
tester.storeReadShare(0.5, 0.5, application1);
tester.assertResources("Advice to scale down since we don't need room for bcp",
4, 1, 3, 100, 100,
tester.autoscale(application1, cluster1.id(), min, max).target());
}
@Test
/**
* This calculator subtracts the memory tax when forecasting overhead, but not when actually
* returning information about nodes. This is allowed because the forecast is a *worst case*.
* It is useful here because it ensures that we end up with the same real (and therefore target)
* resources regardless of tax which makes it easier to compare behavior with different tax levels.
*/
private static class OnlySubtractingWhenForecastingCalculator implements HostResourcesCalculator {
private final int memoryTaxGb;
public OnlySubtractingWhenForecastingCalculator(int memoryTaxGb) {
this.memoryTaxGb = memoryTaxGb;
}
@Override
public NodeResources realResourcesOf(Nodelike node, NodeRepository nodeRepository, boolean exclusive) {
return node.resources();
}
@Override
public NodeResources advertisedResourcesOf(Flavor flavor) {
return flavor.resources();
}
@Override
public NodeResources requestToReal(NodeResources resources, boolean exclusive) {
return resources.withMemoryGb(resources.memoryGb() - memoryTaxGb);
}
@Override
public NodeResources realToRequest(NodeResources resources, boolean exclusive) {
return resources.withMemoryGb(resources.memoryGb() + memoryTaxGb);
}
@Override
public long thinPoolSizeInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
}
} |
When is `duration.isZero()`? Shouldn't we check if `duration.toMinutes() == 0` instead? Same on line 67 | public double maxQueryGrowthRate() {
if (snapshots.isEmpty()) return 0.1;
double maxGrowthRate = 0;
for (int start = 0; start < snapshots.size(); start++) {
if (start > 0) {
Duration duration = durationBetween(start - 1, start);
if ( ! duration.isZero()) {
double growthRate = (queryRateAt(start - 1) - queryRateAt(start)) / duration.toMinutes();
if (growthRate >= maxGrowthRate)
continue;
}
}
for (int end = start + 1; end < snapshots.size(); end++) {
if (queryRateAt(end) >= queryRateAt(start) * 1.3) {
Duration duration = durationBetween(start, end);
if (duration.isZero()) continue;
double growthRate = (queryRateAt(end) - queryRateAt(start)) / duration.toMinutes();
if (growthRate > maxGrowthRate)
maxGrowthRate = growthRate;
}
}
}
if (maxGrowthRate == 0) {
if (durationBetween(0, snapshots.size() - 1).toHours() < 24)
return 0.1;
else
return 0.0;
}
if (queryRateNow() == 0) return 0.1;
return maxGrowthRate / queryRateNow();
} | if ( ! duration.isZero()) { | public double maxQueryGrowthRate() {
if (snapshots.isEmpty()) return 0.1;
double maxGrowthRate = 0;
for (int start = 0; start < snapshots.size(); start++) {
if (start > 0) {
Duration duration = durationBetween(start - 1, start);
if ( ! duration.isZero()) {
double growthRate = (queryRateAt(start - 1) - queryRateAt(start)) / duration.toMinutes();
if (growthRate >= maxGrowthRate)
continue;
}
}
for (int end = start + 1; end < snapshots.size(); end++) {
if (queryRateAt(end) >= queryRateAt(start) * 1.3) {
Duration duration = durationBetween(start, end);
if (duration.isZero()) continue;
double growthRate = (queryRateAt(end) - queryRateAt(start)) / duration.toMinutes();
if (growthRate > maxGrowthRate)
maxGrowthRate = growthRate;
}
}
}
if (maxGrowthRate == 0) {
if (durationBetween(0, snapshots.size() - 1).toHours() < 24)
return 0.1;
else
return 0.0;
}
if (queryRateNow() == 0) return 0.1;
return maxGrowthRate / queryRateNow();
} | class ClusterTimeseries {
private final ClusterSpec.Id cluster;
private final List<ClusterMetricSnapshot> snapshots;
ClusterTimeseries(ClusterSpec.Id cluster, List<ClusterMetricSnapshot> snapshots) {
this.cluster = cluster;
List<ClusterMetricSnapshot> sortedSnapshots = new ArrayList<>(snapshots);
Collections.sort(sortedSnapshots);
this.snapshots = Collections.unmodifiableList(sortedSnapshots);
}
public boolean isEmpty() { return snapshots.isEmpty(); }
public int size() { return snapshots.size(); }
public ClusterMetricSnapshot get(int index) { return snapshots.get(index); }
public List<ClusterMetricSnapshot> asList() { return snapshots; }
public ClusterSpec.Id cluster() { return cluster; }
public ClusterTimeseries add(ClusterMetricSnapshot snapshot) {
List<ClusterMetricSnapshot> list = new ArrayList<>(snapshots);
list.add(snapshot);
return new ClusterTimeseries(cluster, list);
}
/** The max query growth rate we can predict from this time-series as a fraction of the current traffic per minute */
/** The current query rate as a fraction of the peak rate in this timeseries */
public double currentQueryFractionOfMax() {
if (snapshots.isEmpty()) return 0.5;
var max = snapshots.stream().mapToDouble(ClusterMetricSnapshot::queryRate).max().getAsDouble();
return snapshots.get(snapshots.size() - 1).queryRate() / max;
}
private double queryRateAt(int index) {
return snapshots.get(index).queryRate();
}
private double queryRateNow() {
return queryRateAt(snapshots.size() - 1);
}
private Duration durationBetween(int startIndex, int endIndex) {
return Duration.between(snapshots.get(startIndex).at(), snapshots.get(endIndex).at());
}
} | class ClusterTimeseries {
private final ClusterSpec.Id cluster;
private final List<ClusterMetricSnapshot> snapshots;
ClusterTimeseries(ClusterSpec.Id cluster, List<ClusterMetricSnapshot> snapshots) {
this.cluster = cluster;
List<ClusterMetricSnapshot> sortedSnapshots = new ArrayList<>(snapshots);
Collections.sort(sortedSnapshots);
this.snapshots = Collections.unmodifiableList(sortedSnapshots);
}
public boolean isEmpty() { return snapshots.isEmpty(); }
public int size() { return snapshots.size(); }
public ClusterMetricSnapshot get(int index) { return snapshots.get(index); }
public List<ClusterMetricSnapshot> asList() { return snapshots; }
public ClusterSpec.Id cluster() { return cluster; }
public ClusterTimeseries add(ClusterMetricSnapshot snapshot) {
List<ClusterMetricSnapshot> list = new ArrayList<>(snapshots);
list.add(snapshot);
return new ClusterTimeseries(cluster, list);
}
/** The max query growth rate we can predict from this time-series as a fraction of the current traffic per minute */
/** The current query rate as a fraction of the peak rate in this timeseries */
public double currentQueryFractionOfMax() {
if (snapshots.isEmpty()) return 0.5;
var max = snapshots.stream().mapToDouble(ClusterMetricSnapshot::queryRate).max().getAsDouble();
return snapshots.get(snapshots.size() - 1).queryRate() / max;
}
private double queryRateAt(int index) {
return snapshots.get(index).queryRate();
}
private double queryRateNow() {
return queryRateAt(snapshots.size() - 1);
}
private Duration durationBetween(int startIndex, int endIndex) {
return Duration.between(snapshots.get(startIndex).at(), snapshots.get(endIndex).at());
}
} |
Good point; done. (This could happen if we end up with duplicate reads of the same metric window and nothing else.) | public double maxQueryGrowthRate() {
if (snapshots.isEmpty()) return 0.1;
double maxGrowthRate = 0;
for (int start = 0; start < snapshots.size(); start++) {
if (start > 0) {
Duration duration = durationBetween(start - 1, start);
if ( ! duration.isZero()) {
double growthRate = (queryRateAt(start - 1) - queryRateAt(start)) / duration.toMinutes();
if (growthRate >= maxGrowthRate)
continue;
}
}
for (int end = start + 1; end < snapshots.size(); end++) {
if (queryRateAt(end) >= queryRateAt(start) * 1.3) {
Duration duration = durationBetween(start, end);
if (duration.isZero()) continue;
double growthRate = (queryRateAt(end) - queryRateAt(start)) / duration.toMinutes();
if (growthRate > maxGrowthRate)
maxGrowthRate = growthRate;
}
}
}
if (maxGrowthRate == 0) {
if (durationBetween(0, snapshots.size() - 1).toHours() < 24)
return 0.1;
else
return 0.0;
}
if (queryRateNow() == 0) return 0.1;
return maxGrowthRate / queryRateNow();
} | if ( ! duration.isZero()) { | public double maxQueryGrowthRate() {
if (snapshots.isEmpty()) return 0.1;
double maxGrowthRate = 0;
for (int start = 0; start < snapshots.size(); start++) {
if (start > 0) {
Duration duration = durationBetween(start - 1, start);
if ( ! duration.isZero()) {
double growthRate = (queryRateAt(start - 1) - queryRateAt(start)) / duration.toMinutes();
if (growthRate >= maxGrowthRate)
continue;
}
}
for (int end = start + 1; end < snapshots.size(); end++) {
if (queryRateAt(end) >= queryRateAt(start) * 1.3) {
Duration duration = durationBetween(start, end);
if (duration.isZero()) continue;
double growthRate = (queryRateAt(end) - queryRateAt(start)) / duration.toMinutes();
if (growthRate > maxGrowthRate)
maxGrowthRate = growthRate;
}
}
}
if (maxGrowthRate == 0) {
if (durationBetween(0, snapshots.size() - 1).toHours() < 24)
return 0.1;
else
return 0.0;
}
if (queryRateNow() == 0) return 0.1;
return maxGrowthRate / queryRateNow();
} | class ClusterTimeseries {
private final ClusterSpec.Id cluster;
private final List<ClusterMetricSnapshot> snapshots;
ClusterTimeseries(ClusterSpec.Id cluster, List<ClusterMetricSnapshot> snapshots) {
this.cluster = cluster;
List<ClusterMetricSnapshot> sortedSnapshots = new ArrayList<>(snapshots);
Collections.sort(sortedSnapshots);
this.snapshots = Collections.unmodifiableList(sortedSnapshots);
}
public boolean isEmpty() { return snapshots.isEmpty(); }
public int size() { return snapshots.size(); }
public ClusterMetricSnapshot get(int index) { return snapshots.get(index); }
public List<ClusterMetricSnapshot> asList() { return snapshots; }
public ClusterSpec.Id cluster() { return cluster; }
public ClusterTimeseries add(ClusterMetricSnapshot snapshot) {
List<ClusterMetricSnapshot> list = new ArrayList<>(snapshots);
list.add(snapshot);
return new ClusterTimeseries(cluster, list);
}
/** The max query growth rate we can predict from this time-series as a fraction of the current traffic per minute */
/** The current query rate as a fraction of the peak rate in this timeseries */
public double currentQueryFractionOfMax() {
if (snapshots.isEmpty()) return 0.5;
var max = snapshots.stream().mapToDouble(ClusterMetricSnapshot::queryRate).max().getAsDouble();
return snapshots.get(snapshots.size() - 1).queryRate() / max;
}
private double queryRateAt(int index) {
return snapshots.get(index).queryRate();
}
private double queryRateNow() {
return queryRateAt(snapshots.size() - 1);
}
private Duration durationBetween(int startIndex, int endIndex) {
return Duration.between(snapshots.get(startIndex).at(), snapshots.get(endIndex).at());
}
} | class ClusterTimeseries {
private final ClusterSpec.Id cluster;
private final List<ClusterMetricSnapshot> snapshots;
ClusterTimeseries(ClusterSpec.Id cluster, List<ClusterMetricSnapshot> snapshots) {
this.cluster = cluster;
List<ClusterMetricSnapshot> sortedSnapshots = new ArrayList<>(snapshots);
Collections.sort(sortedSnapshots);
this.snapshots = Collections.unmodifiableList(sortedSnapshots);
}
public boolean isEmpty() { return snapshots.isEmpty(); }
public int size() { return snapshots.size(); }
public ClusterMetricSnapshot get(int index) { return snapshots.get(index); }
public List<ClusterMetricSnapshot> asList() { return snapshots; }
public ClusterSpec.Id cluster() { return cluster; }
public ClusterTimeseries add(ClusterMetricSnapshot snapshot) {
List<ClusterMetricSnapshot> list = new ArrayList<>(snapshots);
list.add(snapshot);
return new ClusterTimeseries(cluster, list);
}
/** The max query growth rate we can predict from this time-series as a fraction of the current traffic per minute */
/** The current query rate as a fraction of the peak rate in this timeseries */
public double currentQueryFractionOfMax() {
if (snapshots.isEmpty()) return 0.5;
var max = snapshots.stream().mapToDouble(ClusterMetricSnapshot::queryRate).max().getAsDouble();
return snapshots.get(snapshots.size() - 1).queryRate() / max;
}
private double queryRateAt(int index) {
return snapshots.get(index).queryRate();
}
private double queryRateNow() {
return queryRateAt(snapshots.size() - 1);
}
private Duration durationBetween(int startIndex, int endIndex) {
return Duration.between(snapshots.get(startIndex).at(), snapshots.get(endIndex).at());
}
} |
This appears to use synchronized around a singleton, but we're not that multi-threaded that it would cause a problem. | public String render() {
StringWriter writer = new StringWriter();
Velocity.evaluate(velocityContext, writer, "Template", template);
return writer.toString();
} | Velocity.evaluate(velocityContext, writer, "Template", template); | public String render() {
StringWriter writer = new StringWriter();
Velocity.evaluate(velocityContext, writer, "Template", template);
return writer.toString();
} | class Template {
static {
Velocity.addProperty(Velocity.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.NullLogSystem");
Velocity.init();
}
private final VelocityContext velocityContext = new VelocityContext();
private final String template;
private Template(String template) {
this.template = template;
}
public static Template at(Path templatePath) {
return of(uncheck(() -> new String(Files.readAllBytes(templatePath))));
}
public static Template of(String template) {
return new Template(template);
}
public Template set(String name, Object value) {
velocityContext.put(name, value);
return this;
}
public FileWriter getFileWriterTo(Path destinationPath) {
return new FileWriter(destinationPath, this::render);
}
} | class Template {
static {
Velocity.addProperty(Velocity.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.NullLogSystem");
Velocity.init();
}
private final VelocityContext velocityContext = new VelocityContext();
private final String template;
private Template(String template) {
this.template = template;
}
public static Template at(Path templatePath) {
return of(uncheck(() -> new String(Files.readAllBytes(templatePath))));
}
public static Template of(String template) {
return new Template(template);
}
public Template set(String name, Object value) {
velocityContext.put(name, value);
return this;
}
public FileWriter getFileWriterTo(Path destinationPath) {
return new FileWriter(destinationPath, this::render);
}
} |
Nope. | public String render() {
StringWriter writer = new StringWriter();
Velocity.evaluate(velocityContext, writer, "Template", template);
return writer.toString();
} | Velocity.evaluate(velocityContext, writer, "Template", template); | public String render() {
StringWriter writer = new StringWriter();
Velocity.evaluate(velocityContext, writer, "Template", template);
return writer.toString();
} | class Template {
static {
Velocity.addProperty(Velocity.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.NullLogSystem");
Velocity.init();
}
private final VelocityContext velocityContext = new VelocityContext();
private final String template;
private Template(String template) {
this.template = template;
}
public static Template at(Path templatePath) {
return of(uncheck(() -> new String(Files.readAllBytes(templatePath))));
}
public static Template of(String template) {
return new Template(template);
}
public Template set(String name, Object value) {
velocityContext.put(name, value);
return this;
}
public FileWriter getFileWriterTo(Path destinationPath) {
return new FileWriter(destinationPath, this::render);
}
} | class Template {
static {
Velocity.addProperty(Velocity.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.NullLogSystem");
Velocity.init();
}
private final VelocityContext velocityContext = new VelocityContext();
private final String template;
private Template(String template) {
this.template = template;
}
public static Template at(Path templatePath) {
return of(uncheck(() -> new String(Files.readAllBytes(templatePath))));
}
public static Template of(String template) {
return new Template(template);
}
public Template set(String name, Object value) {
velocityContext.put(name, value);
return this;
}
public FileWriter getFileWriterTo(Path destinationPath) {
return new FileWriter(destinationPath, this::render);
}
} |
Shouldn't this be inverted? If we _not_ require active parents, we can ignore non-active parents. | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
if (requestedNodes.requireActiveParent()) {
Nodes nodes = nodeRepository.nodes();
NodeList activeHosts = nodes.list(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
accepted = accepted.stream()
.filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
.collect(Collectors.toList());
}
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | if (requestedNodes.requireActiveParent()) { | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
if (requestedNodes.rejectNonActiveParent()) {
Nodes nodes = nodeRepository.nodes();
NodeList activeHosts = nodes.list(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
accepted = accepted.stream()
.filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
.collect(Collectors.toList());
}
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} |
Yes, hold on | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
if (requestedNodes.requireActiveParent()) {
Nodes nodes = nodeRepository.nodes();
NodeList activeHosts = nodes.list(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
accepted = accepted.stream()
.filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
.collect(Collectors.toList());
}
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | if (requestedNodes.requireActiveParent()) { | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
if (requestedNodes.rejectNonActiveParent()) {
Nodes nodes = nodeRepository.nodes();
NodeList activeHosts = nodes.list(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
accepted = accepted.stream()
.filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
.collect(Collectors.toList());
}
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} |
Wait, I don't think your statement makes sense | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
if (requestedNodes.requireActiveParent()) {
Nodes nodes = nodeRepository.nodes();
NodeList activeHosts = nodes.list(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
accepted = accepted.stream()
.filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
.collect(Collectors.toList());
}
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | if (requestedNodes.requireActiveParent()) { | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
if (requestedNodes.rejectNonActiveParent()) {
Nodes nodes = nodeRepository.nodes();
NodeList activeHosts = nodes.list(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
accepted = accepted.stream()
.filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
.collect(Collectors.toList());
}
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} |
Something needs to be inverted here, either the name or the implementation + statement | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
if (requestedNodes.requireActiveParent()) {
Nodes nodes = nodeRepository.nodes();
NodeList activeHosts = nodes.list(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
accepted = accepted.stream()
.filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
.collect(Collectors.toList());
}
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | if (requestedNodes.requireActiveParent()) { | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
if (requestedNodes.rejectNonActiveParent()) {
Nodes nodes = nodeRepository.nodes();
NodeList activeHosts = nodes.list(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
accepted = accepted.stream()
.filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
.collect(Collectors.toList());
}
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} |
Changed method name to `rejectNonActiveParents()` | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
if (requestedNodes.requireActiveParent()) {
Nodes nodes = nodeRepository.nodes();
NodeList activeHosts = nodes.list(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
accepted = accepted.stream()
.filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
.collect(Collectors.toList());
}
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | if (requestedNodes.requireActiveParent()) { | private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
List<Integer> usedIndices = nodeRepository.nodes().list()
.owner(application)
.cluster(cluster.id())
.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent());
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
indices, wantedGroups);
if (requestedNodes.rejectNonActiveParent()) {
Nodes nodes = nodeRepository.nodes();
NodeList activeHosts = nodes.list(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
accepted = accepted.stream()
.filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
.collect(Collectors.toList());
}
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
try {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (OutOfCapacityException e) {
throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
" in " + application + " " + cluster +
": " + e.getMessage());
}
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
} |
woudl -> would | private void clearSessionMetaData(boolean clearPendingStateWrites) {
Integer currentVote = (pendingStore.masterVote != null ? pendingStore.masterVote : currentlyStored.masterVote);
currentlyStored.clear();
if (clearPendingStateWrites) {
pendingStore.clear();
} else {
pendingStore.clearNonClusterStateFields();
}
pendingStore.masterVote = currentVote;
log.log(Level.FINE, "Cleared session metadata. Pending master vote is now " + pendingStore.masterVote);
} | private void clearSessionMetaData(boolean clearPendingStateWrites) {
Integer currentVote = (pendingStore.masterVote != null ? pendingStore.masterVote : currentlyStored.masterVote);
currentlyStored.clear();
if (clearPendingStateWrites) {
pendingStore.clear();
} else {
pendingStore.clearNonClusterStateFields();
}
pendingStore.masterVote = currentVote;
log.log(Level.FINE, "Cleared session metadata. Pending master vote is now " + pendingStore.masterVote);
} | class DatabaseListener implements Database.DatabaseListener {
public void handleZooKeeperSessionDown() {
log.log(Level.FINE, "Fleetcontroller " + nodeIndex + ": Lost contact with zookeeper server");
synchronized(monitor) {
lostZooKeeperConnectionEvent = true;
monitor.notifyAll();
}
}
public void handleMasterData(Map<Integer, Integer> data) {
synchronized (monitor) {
if (masterDataEvent != null && masterDataEvent.equals(data)) {
log.log(Level.FINE, "Fleetcontroller " + nodeIndex + ": New master data was the same as the last one. Not responding to it");
} else {
masterDataEvent = data;
}
monitor.notifyAll();
}
}
} | class DatabaseListener implements Database.DatabaseListener {
public void handleZooKeeperSessionDown() {
log.log(Level.FINE, "Fleetcontroller " + nodeIndex + ": Lost contact with zookeeper server");
synchronized(monitor) {
lostZooKeeperConnectionEvent = true;
monitor.notifyAll();
}
}
public void handleMasterData(Map<Integer, Integer> data) {
synchronized (monitor) {
if (masterDataEvent != null && masterDataEvent.equals(data)) {
log.log(Level.FINE, "Fleetcontroller " + nodeIndex + ": New master data was the same as the last one. Not responding to it");
} else {
masterDataEvent = data;
}
monitor.notifyAll();
}
}
} | |
Gee, thanks ... ! | public void createApplication(ApplicationId id) {
if ( ! id.tenant().equals(tenant))
throw new IllegalArgumentException("Cannot write application id '" + id + "' for tenant '" + tenant + "'");
try (Lock lock = lock(id)) {
if (curator.exists(applicationPath(id))) return;
curator.create(applicationPath(id));
modifyReindexing(id, ApplicationReindexing.empty(), UnaryOperator.identity());
setDedicatedClusterControllerCluster(id);
}
} | if (curator.exists(applicationPath(id))) return; | public void createApplication(ApplicationId id) {
if ( ! id.tenant().equals(tenant))
throw new IllegalArgumentException("Cannot write application id '" + id + "' for tenant '" + tenant + "'");
try (Lock lock = lock(id)) {
if (curator.exists(applicationPath(id))) return;
curator.create(applicationPath(id));
modifyReindexing(id, ApplicationReindexing.empty(), UnaryOperator.identity());
setDedicatedClusterControllerCluster(id);
}
} | class ApplicationCuratorDatabase {
final TenantName tenant;
final Path applicationsPath;
final Path locksPath;
private final Curator curator;
public ApplicationCuratorDatabase(TenantName tenant, Curator curator) {
this.tenant = tenant;
this.applicationsPath = TenantRepository.getApplicationsPath(tenant);
this.locksPath = TenantRepository.getLocksPath(tenant);
this.curator = curator;
}
/** Returns the lock for changing the session status of the given application. */
public Lock lock(ApplicationId id) {
return curator.lock(lockPath(id), Duration.ofMinutes(1));
}
/** Reads, modifies and writes the application reindexing for this application, while holding its lock. */
public void modifyReindexing(ApplicationId id, ApplicationReindexing emptyValue, UnaryOperator<ApplicationReindexing> modifications) {
try (Lock lock = curator.lock(reindexingLockPath(id), Duration.ofMinutes(1))) {
writeReindexingStatus(id, modifications.apply(readReindexingStatus(id).orElse(emptyValue)));
}
}
public boolean exists(ApplicationId id) {
return curator.exists(applicationPath(id));
}
/**
* Creates a node for the given application, marking its existence.
*/
/**
* Returns a transaction which writes the given session id as the currently active for the given application.
*
* @param applicationId An {@link ApplicationId} that represents an active application.
* @param sessionId Id of the session containing the application package for this id.
*/
public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) {
return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationPath(applicationId).getAbsolute(), Utf8.toAsciiBytes(sessionId)));
}
/**
* Returns a transaction which deletes this application.
*/
public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) {
return CuratorTransaction.from(CuratorOperations.deleteAll(applicationPath(applicationId).getAbsolute(), curator), curator);
}
/**
* Returns the active session id for the given application.
* Returns Optional.empty if application not found or no active session exists.
*/
public Optional<Long> activeSessionOf(ApplicationId id) {
Optional<byte[]> data = curator.getData(applicationPath(id));
return (data.isEmpty() || data.get().length == 0)
? Optional.empty()
: data.map(bytes -> Long.parseLong(Utf8.toString(bytes)));
}
public boolean getDedicatedClusterControllerCluster(ApplicationId id) {
return curator.exists(dedicatedClusterControllerClusterPath(id));
}
public void setDedicatedClusterControllerCluster(ApplicationId id) {
curator.create(dedicatedClusterControllerClusterPath(id));
}
/**
* List the active applications of a tenant in this config server.
*
* @return a list of {@link ApplicationId}s that are active.
*/
public List<ApplicationId> activeApplications() {
return curator.getChildren(applicationsPath).stream()
.sorted()
.map(ApplicationId::fromSerializedForm)
.filter(id -> activeSessionOf(id).isPresent())
.collect(Collectors.toUnmodifiableList());
}
public Optional<ApplicationReindexing> readReindexingStatus(ApplicationId id) {
return curator.getData(reindexingDataPath(id))
.map(ReindexingStatusSerializer::fromBytes);
}
void writeReindexingStatus(ApplicationId id, ApplicationReindexing status) {
curator.set(reindexingDataPath(id), ReindexingStatusSerializer.toBytes(status));
}
/** Sets up a listenable cache with the given listener, over the applications path of this tenant. */
public Curator.DirectoryCache createApplicationsPathCache(ExecutorService zkCacheExecutor) {
return curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, zkCacheExecutor);
}
private Path reindexingLockPath(ApplicationId id) {
return locksPath.append(id.serializedForm()).append("reindexing");
}
private Path lockPath(ApplicationId id) {
return locksPath.append(id.serializedForm());
}
private Path applicationPath(ApplicationId id) {
return applicationsPath.append(id.serializedForm());
}
private Path dedicatedClusterControllerClusterPath(ApplicationId id) {
return applicationPath(id).append("dedicatedClusterControllerCluster");
}
private Path reindexingDataPath(ApplicationId id) {
return applicationPath(id).append("reindexing");
}
private static class ReindexingStatusSerializer {
private static final String ENABLED = "enabled";
private static final String CLUSTERS = "clusters";
private static final String PENDING = "pending";
private static final String READY = "ready";
private static final String TYPE = "type";
private static final String NAME = "name";
private static final String GENERATION = "generation";
private static final String EPOCH_MILLIS = "epochMillis";
private static byte[] toBytes(ApplicationReindexing reindexing) {
Cursor root = new Slime().setObject();
root.setBool(ENABLED, reindexing.enabled());
Cursor clustersArray = root.setArray(CLUSTERS);
reindexing.clusters().forEach((name, cluster) -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString(NAME, name);
Cursor pendingArray = clusterObject.setArray(PENDING);
cluster.pending().forEach((type, generation) -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString(TYPE, type);
pendingObject.setLong(GENERATION, generation);
});
Cursor readyArray = clusterObject.setArray(READY);
cluster.ready().forEach((type, status) -> {
Cursor statusObject = readyArray.addObject();
statusObject.setString(TYPE, type);
setStatus(statusObject, status);
});
});
return Exceptions.uncheck(() -> SlimeUtils.toJsonBytes(root));
}
private static void setStatus(Cursor statusObject, Status status) {
statusObject.setLong(EPOCH_MILLIS, status.ready().toEpochMilli());
}
private static ApplicationReindexing fromBytes(byte[] data) {
Cursor root = SlimeUtils.jsonToSlimeOrThrow(data).get();
return new ApplicationReindexing(root.field(ENABLED).valid() ? root.field(ENABLED).asBool() : true,
SlimeUtils.entriesStream(root.field(CLUSTERS))
.collect(toUnmodifiableMap(object -> object.field(NAME).asString(),
object -> getCluster(object))));
}
private static Cluster getCluster(Inspector object) {
return new Cluster(SlimeUtils.entriesStream(object.field(PENDING))
.collect(toUnmodifiableMap(entry -> entry.field(TYPE).asString(),
entry -> entry.field(GENERATION).asLong())),
SlimeUtils.entriesStream(object.field(READY))
.collect(toUnmodifiableMap(entry -> entry.field(TYPE).asString(),
entry -> getStatus(entry))));
}
private static Status getStatus(Inspector statusObject) {
return new Status(Instant.ofEpochMilli(statusObject.field(EPOCH_MILLIS).asLong()));
}
}
} | class ApplicationCuratorDatabase {
final TenantName tenant;
final Path applicationsPath;
final Path locksPath;
private final Curator curator;
public ApplicationCuratorDatabase(TenantName tenant, Curator curator) {
this.tenant = tenant;
this.applicationsPath = TenantRepository.getApplicationsPath(tenant);
this.locksPath = TenantRepository.getLocksPath(tenant);
this.curator = curator;
}
/** Returns the lock for changing the session status of the given application. */
public Lock lock(ApplicationId id) {
return curator.lock(lockPath(id), Duration.ofMinutes(1));
}
/** Reads, modifies and writes the application reindexing for this application, while holding its lock. */
public void modifyReindexing(ApplicationId id, ApplicationReindexing emptyValue, UnaryOperator<ApplicationReindexing> modifications) {
try (Lock lock = curator.lock(reindexingLockPath(id), Duration.ofMinutes(1))) {
writeReindexingStatus(id, modifications.apply(readReindexingStatus(id).orElse(emptyValue)));
}
}
public boolean exists(ApplicationId id) {
return curator.exists(applicationPath(id));
}
/**
* Creates a node for the given application, marking its existence.
*/
/**
* Returns a transaction which writes the given session id as the currently active for the given application.
*
* @param applicationId An {@link ApplicationId} that represents an active application.
* @param sessionId Id of the session containing the application package for this id.
*/
public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) {
return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationPath(applicationId).getAbsolute(), Utf8.toAsciiBytes(sessionId)));
}
/**
* Returns a transaction which deletes this application.
*/
public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) {
return CuratorTransaction.from(CuratorOperations.deleteAll(applicationPath(applicationId).getAbsolute(), curator), curator);
}
/**
* Returns the active session id for the given application.
* Returns Optional.empty if application not found or no active session exists.
*/
public Optional<Long> activeSessionOf(ApplicationId id) {
Optional<byte[]> data = curator.getData(applicationPath(id));
return (data.isEmpty() || data.get().length == 0)
? Optional.empty()
: data.map(bytes -> Long.parseLong(Utf8.toString(bytes)));
}
public boolean getDedicatedClusterControllerCluster(ApplicationId id) {
return curator.exists(dedicatedClusterControllerClusterPath(id));
}
public void setDedicatedClusterControllerCluster(ApplicationId id) {
curator.create(dedicatedClusterControllerClusterPath(id));
}
/**
* List the active applications of a tenant in this config server.
*
* @return a list of {@link ApplicationId}s that are active.
*/
public List<ApplicationId> activeApplications() {
return curator.getChildren(applicationsPath).stream()
.sorted()
.map(ApplicationId::fromSerializedForm)
.filter(id -> activeSessionOf(id).isPresent())
.collect(Collectors.toUnmodifiableList());
}
public Optional<ApplicationReindexing> readReindexingStatus(ApplicationId id) {
return curator.getData(reindexingDataPath(id))
.map(ReindexingStatusSerializer::fromBytes);
}
void writeReindexingStatus(ApplicationId id, ApplicationReindexing status) {
curator.set(reindexingDataPath(id), ReindexingStatusSerializer.toBytes(status));
}
/** Sets up a listenable cache with the given listener, over the applications path of this tenant. */
public Curator.DirectoryCache createApplicationsPathCache(ExecutorService zkCacheExecutor) {
return curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, zkCacheExecutor);
}
private Path reindexingLockPath(ApplicationId id) {
return locksPath.append(id.serializedForm()).append("reindexing");
}
private Path lockPath(ApplicationId id) {
return locksPath.append(id.serializedForm());
}
private Path applicationPath(ApplicationId id) {
return applicationsPath.append(id.serializedForm());
}
private Path dedicatedClusterControllerClusterPath(ApplicationId id) {
return applicationPath(id).append("dedicatedClusterControllerCluster");
}
private Path reindexingDataPath(ApplicationId id) {
return applicationPath(id).append("reindexing");
}
private static class ReindexingStatusSerializer {
private static final String ENABLED = "enabled";
private static final String CLUSTERS = "clusters";
private static final String PENDING = "pending";
private static final String READY = "ready";
private static final String TYPE = "type";
private static final String NAME = "name";
private static final String GENERATION = "generation";
private static final String EPOCH_MILLIS = "epochMillis";
private static byte[] toBytes(ApplicationReindexing reindexing) {
Cursor root = new Slime().setObject();
root.setBool(ENABLED, reindexing.enabled());
Cursor clustersArray = root.setArray(CLUSTERS);
reindexing.clusters().forEach((name, cluster) -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString(NAME, name);
Cursor pendingArray = clusterObject.setArray(PENDING);
cluster.pending().forEach((type, generation) -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString(TYPE, type);
pendingObject.setLong(GENERATION, generation);
});
Cursor readyArray = clusterObject.setArray(READY);
cluster.ready().forEach((type, status) -> {
Cursor statusObject = readyArray.addObject();
statusObject.setString(TYPE, type);
setStatus(statusObject, status);
});
});
return Exceptions.uncheck(() -> SlimeUtils.toJsonBytes(root));
}
private static void setStatus(Cursor statusObject, Status status) {
statusObject.setLong(EPOCH_MILLIS, status.ready().toEpochMilli());
}
private static ApplicationReindexing fromBytes(byte[] data) {
Cursor root = SlimeUtils.jsonToSlimeOrThrow(data).get();
return new ApplicationReindexing(root.field(ENABLED).valid() ? root.field(ENABLED).asBool() : true,
SlimeUtils.entriesStream(root.field(CLUSTERS))
.collect(toUnmodifiableMap(object -> object.field(NAME).asString(),
object -> getCluster(object))));
}
private static Cluster getCluster(Inspector object) {
return new Cluster(SlimeUtils.entriesStream(object.field(PENDING))
.collect(toUnmodifiableMap(entry -> entry.field(TYPE).asString(),
entry -> entry.field(GENERATION).asLong())),
SlimeUtils.entriesStream(object.field(READY))
.collect(toUnmodifiableMap(entry -> entry.field(TYPE).asString(),
entry -> getStatus(entry))));
}
private static Status getStatus(Inspector statusObject) {
return new Status(Instant.ofEpochMilli(statusObject.field(EPOCH_MILLIS).asLong()));
}
}
} |
We should propagate the status code. These will always return `200` | private HttpResponse validateSecretStore(String tenantName, String name, String region, String parameterName) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Tenant '" + tenant + "' is not a cloud tenant");
var cloudTenant = (CloudTenant)controller.tenants().require(tenant);
var tenantSecretStore = cloudTenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
var deployment = getActiveDeployment(tenant);
if (deployment.isEmpty())
return ErrorResponse.badRequest("Tenant '" + tenantName + "' has no active deployments");
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + name + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deployment.get(), tenantSecretStore.get(), region, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deployment.get().toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return new MessageResponse(response);
}
} | return new MessageResponse(response); | private HttpResponse validateSecretStore(String tenantName, String name, String region, String parameterName) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Tenant '" + tenant + "' is not a cloud tenant");
var cloudTenant = (CloudTenant)controller.tenants().require(tenant);
var tenantSecretStore = cloudTenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
var deployment = getActiveDeployment(tenant);
if (deployment.isEmpty())
return ErrorResponse.badRequest("Tenant '" + tenantName + "' has no active deployments");
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + name + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deployment.get(), tenantSecretStore.get(), region, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deployment.get().toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/region/{region}/parameter-name/{parameter-name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), path.get("region"), path.get("parameter-name"));
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private Optional<DeploymentId> getActiveDeployment(TenantName tenant) {
for (var application : controller.applications().asList(tenant)) {
var optionalInstance = application.instances().values()
.stream()
.filter(instance -> instance.deployments().keySet().size() > 0)
.findFirst();
if (optionalInstance.isPresent()) {
var instance = optionalInstance.get();
var applicationId = instance.id();
var zoneId = instance.deployments().keySet().stream().findFirst().orElseThrow();
return Optional.of(new DeploymentId(applicationId, zoneId));
}
}
return Optional.empty();
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is invalid"));
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is already configured"));
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.SEVERE, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone)
.not().legacy();
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(zone),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(com.yahoo.vespa.hosted.controller.Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
tenantMetaDataToSlime(tenant, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("disk", utilization.disk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) {
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> controller.jobController().jobs(instance.id()).stream()
.filter(jobType -> jobType.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder());
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
Cursor storeObject = secretStore.addObject();
storeObject.setString("name", store.getName());
storeObject.setString("awsId", store.getAwsId());
storeObject.setString("role", store.getRole());
});
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/region/{region}/parameter-name/{parameter-name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), path.get("region"), path.get("parameter-name"));
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private Optional<DeploymentId> getActiveDeployment(TenantName tenant) {
for (var application : controller.applications().asList(tenant)) {
var optionalInstance = application.instances().values()
.stream()
.filter(instance -> instance.deployments().keySet().size() > 0)
.findFirst();
if (optionalInstance.isPresent()) {
var instance = optionalInstance.get();
var applicationId = instance.id();
var zoneId = instance.deployments().keySet().stream().findFirst().orElseThrow();
return Optional.of(new DeploymentId(applicationId, zoneId));
}
}
return Optional.empty();
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is invalid"));
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is already configured"));
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.SEVERE, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone)
.not().legacy();
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(zone),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(com.yahoo.vespa.hosted.controller.Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
tenantMetaDataToSlime(tenant, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("disk", utilization.disk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) {
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> controller.jobController().jobs(instance.id()).stream()
.filter(jobType -> jobType.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder());
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
Cursor storeObject = secretStore.addObject();
storeObject.setString("name", store.getName());
storeObject.setString("awsId", store.getAwsId());
storeObject.setString("role", store.getRole());
});
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} |
Good catch. Changed to `ErrorResponse.internalServerError()` | private HttpResponse validateSecretStore(String tenantName, String name, String region, String parameterName) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Tenant '" + tenant + "' is not a cloud tenant");
var cloudTenant = (CloudTenant)controller.tenants().require(tenant);
var tenantSecretStore = cloudTenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
var deployment = getActiveDeployment(tenant);
if (deployment.isEmpty())
return ErrorResponse.badRequest("Tenant '" + tenantName + "' has no active deployments");
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + name + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deployment.get(), tenantSecretStore.get(), region, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deployment.get().toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return new MessageResponse(response);
}
} | return new MessageResponse(response); | private HttpResponse validateSecretStore(String tenantName, String name, String region, String parameterName) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Tenant '" + tenant + "' is not a cloud tenant");
var cloudTenant = (CloudTenant)controller.tenants().require(tenant);
var tenantSecretStore = cloudTenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
var deployment = getActiveDeployment(tenant);
if (deployment.isEmpty())
return ErrorResponse.badRequest("Tenant '" + tenantName + "' has no active deployments");
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + name + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deployment.get(), tenantSecretStore.get(), region, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deployment.get().toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/region/{region}/parameter-name/{parameter-name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), path.get("region"), path.get("parameter-name"));
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private Optional<DeploymentId> getActiveDeployment(TenantName tenant) {
for (var application : controller.applications().asList(tenant)) {
var optionalInstance = application.instances().values()
.stream()
.filter(instance -> instance.deployments().keySet().size() > 0)
.findFirst();
if (optionalInstance.isPresent()) {
var instance = optionalInstance.get();
var applicationId = instance.id();
var zoneId = instance.deployments().keySet().stream().findFirst().orElseThrow();
return Optional.of(new DeploymentId(applicationId, zoneId));
}
}
return Optional.empty();
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is invalid"));
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is already configured"));
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.SEVERE, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone)
.not().legacy();
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(zone),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(com.yahoo.vespa.hosted.controller.Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
tenantMetaDataToSlime(tenant, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("disk", utilization.disk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) {
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> controller.jobController().jobs(instance.id()).stream()
.filter(jobType -> jobType.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder());
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
Cursor storeObject = secretStore.addObject();
storeObject.setString("name", store.getName());
storeObject.setString("awsId", store.getAwsId());
storeObject.setString("role", store.getRole());
});
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/region/{region}/parameter-name/{parameter-name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), path.get("region"), path.get("parameter-name"));
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private Optional<DeploymentId> getActiveDeployment(TenantName tenant) {
for (var application : controller.applications().asList(tenant)) {
var optionalInstance = application.instances().values()
.stream()
.filter(instance -> instance.deployments().keySet().size() > 0)
.findFirst();
if (optionalInstance.isPresent()) {
var instance = optionalInstance.get();
var applicationId = instance.id();
var zoneId = instance.deployments().keySet().stream().findFirst().orElseThrow();
return Optional.of(new DeploymentId(applicationId, zoneId));
}
}
return Optional.empty();
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is invalid"));
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is already configured"));
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.SEVERE, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone)
.not().legacy();
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(zone),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(com.yahoo.vespa.hosted.controller.Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
tenantMetaDataToSlime(tenant, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("disk", utilization.disk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) {
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> controller.jobController().jobs(instance.id()).stream()
.filter(jobType -> jobType.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder());
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
Cursor storeObject = secretStore.addObject();
storeObject.setString("name", store.getName());
storeObject.setString("awsId", store.getAwsId());
storeObject.setString("role", store.getRole());
});
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} |
You can use `log.log(Level.FINE, () -> `instead | private List<String> awaitInternal(Duration timeout) throws Exception {
Instant startTime = clock.instant();
Instant endTime = startTime.plus(timeout);
List<String> respondents;
do {
respondents = curator.framework().getChildren().forPath(barrierPath);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " +
respondents + ", all participants: " + curator.zooKeeperEnsembleConnectionSpec());
}
if (respondents.size() == curator.zooKeeperEnsembleCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
if (respondents.size() >= barrierMemberCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
Thread.sleep(100);
} while (clock.instant().isBefore(endTime));
return respondents;
} | if (log.isLoggable(Level.FINE)) { | private List<String> awaitInternal(Duration timeout) throws Exception {
Instant startTime = clock.instant();
Instant endTime = startTime.plus(timeout);
List<String> respondents;
do {
respondents = curator.framework().getChildren().forPath(barrierPath);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " +
respondents + ", all participants: " + curator.zooKeeperEnsembleConnectionSpec());
}
if (respondents.size() == curator.zooKeeperEnsembleCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
if (respondents.size() >= barrierMemberCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
Thread.sleep(100);
} while (clock.instant().isBefore(endTime));
return respondents;
} | class CuratorCompletionWaiter implements Curator.CompletionWaiter {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(CuratorCompletionWaiter.class.getName());
private final Curator curator;
private final String barrierPath;
private final String myId;
private final Clock clock;
CuratorCompletionWaiter(Curator curator, String barrierPath, String myId, Clock clock) {
this.myId = barrierPath + "/" + myId;
this.curator = curator;
this.barrierPath = barrierPath;
this.clock = clock;
}
@Override
public void awaitCompletion(Duration timeout) {
List<String> respondents;
try {
log.log(Level.FINE, "Synchronizing on barrier " + barrierPath);
respondents = awaitInternal(timeout);
log.log(Level.FINE, "Done synchronizing on barrier " + barrierPath);
} catch (Exception e) {
throw new RuntimeException(e);
}
if (respondents.size() < barrierMemberCount()) {
throw new CompletionTimeoutException("Timed out waiting for peer config servers to complete operation " +
"(waited for barrier " + barrierPath + ")." +
"Got response from " + respondents + ", but need response from " +
"at least " + barrierMemberCount() + " server(s). " +
"Timeout passed as argument was " + timeout.toMillis() + " ms");
}
}
private String barrierCompletedMessage(List<String> respondents, Instant startTime) {
return barrierPath + " completed in " + Duration.between(startTime, Instant.now()).toString() +
", " + respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " + respondents;
}
@Override
public void notifyCompletion() {
try {
notifyInternal();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void notifyInternal() throws Exception {
curator.framework().create().forPath(myId);
}
@Override
public String toString() {
return "'" + barrierPath + "', " + barrierMemberCount() + " members";
}
public static Curator.CompletionWaiter create(Curator curator, Path barrierPath, String id) {
return new CuratorCompletionWaiter(curator, barrierPath.getAbsolute(), id, Clock.systemUTC());
}
public static Curator.CompletionWaiter createAndInitialize(Curator curator, Path parentPath, String waiterNode, String id) {
Path waiterPath = parentPath.append(waiterNode);
curator.delete(waiterPath);
curator.createAtomically(waiterPath);
return new CuratorCompletionWaiter(curator, waiterPath.getAbsolute(), id, Clock.systemUTC());
}
private int barrierMemberCount() {
return (curator.zooKeeperEnsembleCount() / 2) + 1;
}
} | class CuratorCompletionWaiter implements Curator.CompletionWaiter {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(CuratorCompletionWaiter.class.getName());
private final Curator curator;
private final String barrierPath;
private final String myId;
private final Clock clock;
CuratorCompletionWaiter(Curator curator, String barrierPath, String myId, Clock clock) {
this.myId = barrierPath + "/" + myId;
this.curator = curator;
this.barrierPath = barrierPath;
this.clock = clock;
}
@Override
public void awaitCompletion(Duration timeout) {
List<String> respondents;
try {
log.log(Level.FINE, "Synchronizing on barrier " + barrierPath);
respondents = awaitInternal(timeout);
log.log(Level.FINE, "Done synchronizing on barrier " + barrierPath);
} catch (Exception e) {
throw new RuntimeException(e);
}
if (respondents.size() < barrierMemberCount()) {
throw new CompletionTimeoutException("Timed out waiting for peer config servers to complete operation " +
"(waited for barrier " + barrierPath + ")." +
"Got response from " + respondents + ", but need response from " +
"at least " + barrierMemberCount() + " server(s). " +
"Timeout passed as argument was " + timeout.toMillis() + " ms");
}
}
private String barrierCompletedMessage(List<String> respondents, Instant startTime) {
return barrierPath + " completed in " + Duration.between(startTime, Instant.now()).toString() +
", " + respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " + respondents;
}
@Override
public void notifyCompletion() {
try {
notifyInternal();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void notifyInternal() throws Exception {
curator.framework().create().forPath(myId);
}
@Override
public String toString() {
return "'" + barrierPath + "', " + barrierMemberCount() + " members";
}
public static Curator.CompletionWaiter create(Curator curator, Path barrierPath, String id) {
return new CuratorCompletionWaiter(curator, barrierPath.getAbsolute(), id, Clock.systemUTC());
}
public static Curator.CompletionWaiter createAndInitialize(Curator curator, Path parentPath, String waiterNode, String id) {
Path waiterPath = parentPath.append(waiterNode);
curator.delete(waiterPath);
curator.createAtomically(waiterPath);
return new CuratorCompletionWaiter(curator, waiterPath.getAbsolute(), id, Clock.systemUTC());
}
private int barrierMemberCount() {
return (curator.zooKeeperEnsembleCount() / 2) + 1;
}
} |
Yes, but in this particular case one of the arguments is non-final, and making it final is much more work :) | private List<String> awaitInternal(Duration timeout) throws Exception {
Instant startTime = clock.instant();
Instant endTime = startTime.plus(timeout);
List<String> respondents;
do {
respondents = curator.framework().getChildren().forPath(barrierPath);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " +
respondents + ", all participants: " + curator.zooKeeperEnsembleConnectionSpec());
}
if (respondents.size() == curator.zooKeeperEnsembleCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
if (respondents.size() >= barrierMemberCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
Thread.sleep(100);
} while (clock.instant().isBefore(endTime));
return respondents;
} | if (log.isLoggable(Level.FINE)) { | private List<String> awaitInternal(Duration timeout) throws Exception {
Instant startTime = clock.instant();
Instant endTime = startTime.plus(timeout);
List<String> respondents;
do {
respondents = curator.framework().getChildren().forPath(barrierPath);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " +
respondents + ", all participants: " + curator.zooKeeperEnsembleConnectionSpec());
}
if (respondents.size() == curator.zooKeeperEnsembleCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
if (respondents.size() >= barrierMemberCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
Thread.sleep(100);
} while (clock.instant().isBefore(endTime));
return respondents;
} | class CuratorCompletionWaiter implements Curator.CompletionWaiter {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(CuratorCompletionWaiter.class.getName());
private final Curator curator;
private final String barrierPath;
private final String myId;
private final Clock clock;
CuratorCompletionWaiter(Curator curator, String barrierPath, String myId, Clock clock) {
this.myId = barrierPath + "/" + myId;
this.curator = curator;
this.barrierPath = barrierPath;
this.clock = clock;
}
@Override
public void awaitCompletion(Duration timeout) {
List<String> respondents;
try {
log.log(Level.FINE, "Synchronizing on barrier " + barrierPath);
respondents = awaitInternal(timeout);
log.log(Level.FINE, "Done synchronizing on barrier " + barrierPath);
} catch (Exception e) {
throw new RuntimeException(e);
}
if (respondents.size() < barrierMemberCount()) {
throw new CompletionTimeoutException("Timed out waiting for peer config servers to complete operation " +
"(waited for barrier " + barrierPath + ")." +
"Got response from " + respondents + ", but need response from " +
"at least " + barrierMemberCount() + " server(s). " +
"Timeout passed as argument was " + timeout.toMillis() + " ms");
}
}
private String barrierCompletedMessage(List<String> respondents, Instant startTime) {
return barrierPath + " completed in " + Duration.between(startTime, Instant.now()).toString() +
", " + respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " + respondents;
}
@Override
public void notifyCompletion() {
try {
notifyInternal();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void notifyInternal() throws Exception {
curator.framework().create().forPath(myId);
}
@Override
public String toString() {
return "'" + barrierPath + "', " + barrierMemberCount() + " members";
}
public static Curator.CompletionWaiter create(Curator curator, Path barrierPath, String id) {
return new CuratorCompletionWaiter(curator, barrierPath.getAbsolute(), id, Clock.systemUTC());
}
public static Curator.CompletionWaiter createAndInitialize(Curator curator, Path parentPath, String waiterNode, String id) {
Path waiterPath = parentPath.append(waiterNode);
curator.delete(waiterPath);
curator.createAtomically(waiterPath);
return new CuratorCompletionWaiter(curator, waiterPath.getAbsolute(), id, Clock.systemUTC());
}
private int barrierMemberCount() {
return (curator.zooKeeperEnsembleCount() / 2) + 1;
}
} | class CuratorCompletionWaiter implements Curator.CompletionWaiter {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(CuratorCompletionWaiter.class.getName());
private final Curator curator;
private final String barrierPath;
private final String myId;
private final Clock clock;
CuratorCompletionWaiter(Curator curator, String barrierPath, String myId, Clock clock) {
this.myId = barrierPath + "/" + myId;
this.curator = curator;
this.barrierPath = barrierPath;
this.clock = clock;
}
@Override
public void awaitCompletion(Duration timeout) {
List<String> respondents;
try {
log.log(Level.FINE, "Synchronizing on barrier " + barrierPath);
respondents = awaitInternal(timeout);
log.log(Level.FINE, "Done synchronizing on barrier " + barrierPath);
} catch (Exception e) {
throw new RuntimeException(e);
}
if (respondents.size() < barrierMemberCount()) {
throw new CompletionTimeoutException("Timed out waiting for peer config servers to complete operation " +
"(waited for barrier " + barrierPath + ")." +
"Got response from " + respondents + ", but need response from " +
"at least " + barrierMemberCount() + " server(s). " +
"Timeout passed as argument was " + timeout.toMillis() + " ms");
}
}
private String barrierCompletedMessage(List<String> respondents, Instant startTime) {
return barrierPath + " completed in " + Duration.between(startTime, Instant.now()).toString() +
", " + respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " + respondents;
}
@Override
public void notifyCompletion() {
try {
notifyInternal();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void notifyInternal() throws Exception {
curator.framework().create().forPath(myId);
}
@Override
public String toString() {
return "'" + barrierPath + "', " + barrierMemberCount() + " members";
}
public static Curator.CompletionWaiter create(Curator curator, Path barrierPath, String id) {
return new CuratorCompletionWaiter(curator, barrierPath.getAbsolute(), id, Clock.systemUTC());
}
public static Curator.CompletionWaiter createAndInitialize(Curator curator, Path parentPath, String waiterNode, String id) {
Path waiterPath = parentPath.append(waiterNode);
curator.delete(waiterPath);
curator.createAtomically(waiterPath);
return new CuratorCompletionWaiter(curator, waiterPath.getAbsolute(), id, Clock.systemUTC());
}
private int barrierMemberCount() {
return (curator.zooKeeperEnsembleCount() / 2) + 1;
}
} |
Yes, right, that's a valid case for sure | private List<String> awaitInternal(Duration timeout) throws Exception {
Instant startTime = clock.instant();
Instant endTime = startTime.plus(timeout);
List<String> respondents;
do {
respondents = curator.framework().getChildren().forPath(barrierPath);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " +
respondents + ", all participants: " + curator.zooKeeperEnsembleConnectionSpec());
}
if (respondents.size() == curator.zooKeeperEnsembleCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
if (respondents.size() >= barrierMemberCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
Thread.sleep(100);
} while (clock.instant().isBefore(endTime));
return respondents;
} | if (log.isLoggable(Level.FINE)) { | private List<String> awaitInternal(Duration timeout) throws Exception {
Instant startTime = clock.instant();
Instant endTime = startTime.plus(timeout);
List<String> respondents;
do {
respondents = curator.framework().getChildren().forPath(barrierPath);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " +
respondents + ", all participants: " + curator.zooKeeperEnsembleConnectionSpec());
}
if (respondents.size() == curator.zooKeeperEnsembleCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
if (respondents.size() >= barrierMemberCount()) {
log.log(Level.FINE, barrierCompletedMessage(respondents, startTime));
break;
}
Thread.sleep(100);
} while (clock.instant().isBefore(endTime));
return respondents;
} | class CuratorCompletionWaiter implements Curator.CompletionWaiter {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(CuratorCompletionWaiter.class.getName());
private final Curator curator;
private final String barrierPath;
private final String myId;
private final Clock clock;
CuratorCompletionWaiter(Curator curator, String barrierPath, String myId, Clock clock) {
this.myId = barrierPath + "/" + myId;
this.curator = curator;
this.barrierPath = barrierPath;
this.clock = clock;
}
@Override
public void awaitCompletion(Duration timeout) {
List<String> respondents;
try {
log.log(Level.FINE, "Synchronizing on barrier " + barrierPath);
respondents = awaitInternal(timeout);
log.log(Level.FINE, "Done synchronizing on barrier " + barrierPath);
} catch (Exception e) {
throw new RuntimeException(e);
}
if (respondents.size() < barrierMemberCount()) {
throw new CompletionTimeoutException("Timed out waiting for peer config servers to complete operation " +
"(waited for barrier " + barrierPath + ")." +
"Got response from " + respondents + ", but need response from " +
"at least " + barrierMemberCount() + " server(s). " +
"Timeout passed as argument was " + timeout.toMillis() + " ms");
}
}
private String barrierCompletedMessage(List<String> respondents, Instant startTime) {
return barrierPath + " completed in " + Duration.between(startTime, Instant.now()).toString() +
", " + respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " + respondents;
}
@Override
public void notifyCompletion() {
try {
notifyInternal();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void notifyInternal() throws Exception {
curator.framework().create().forPath(myId);
}
@Override
public String toString() {
return "'" + barrierPath + "', " + barrierMemberCount() + " members";
}
public static Curator.CompletionWaiter create(Curator curator, Path barrierPath, String id) {
return new CuratorCompletionWaiter(curator, barrierPath.getAbsolute(), id, Clock.systemUTC());
}
public static Curator.CompletionWaiter createAndInitialize(Curator curator, Path parentPath, String waiterNode, String id) {
Path waiterPath = parentPath.append(waiterNode);
curator.delete(waiterPath);
curator.createAtomically(waiterPath);
return new CuratorCompletionWaiter(curator, waiterPath.getAbsolute(), id, Clock.systemUTC());
}
private int barrierMemberCount() {
return (curator.zooKeeperEnsembleCount() / 2) + 1;
}
} | class CuratorCompletionWaiter implements Curator.CompletionWaiter {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(CuratorCompletionWaiter.class.getName());
private final Curator curator;
private final String barrierPath;
private final String myId;
private final Clock clock;
CuratorCompletionWaiter(Curator curator, String barrierPath, String myId, Clock clock) {
this.myId = barrierPath + "/" + myId;
this.curator = curator;
this.barrierPath = barrierPath;
this.clock = clock;
}
@Override
public void awaitCompletion(Duration timeout) {
List<String> respondents;
try {
log.log(Level.FINE, "Synchronizing on barrier " + barrierPath);
respondents = awaitInternal(timeout);
log.log(Level.FINE, "Done synchronizing on barrier " + barrierPath);
} catch (Exception e) {
throw new RuntimeException(e);
}
if (respondents.size() < barrierMemberCount()) {
throw new CompletionTimeoutException("Timed out waiting for peer config servers to complete operation " +
"(waited for barrier " + barrierPath + ")." +
"Got response from " + respondents + ", but need response from " +
"at least " + barrierMemberCount() + " server(s). " +
"Timeout passed as argument was " + timeout.toMillis() + " ms");
}
}
private String barrierCompletedMessage(List<String> respondents, Instant startTime) {
return barrierPath + " completed in " + Duration.between(startTime, Instant.now()).toString() +
", " + respondents.size() + "/" + curator.zooKeeperEnsembleCount() + " responded: " + respondents;
}
@Override
public void notifyCompletion() {
try {
notifyInternal();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void notifyInternal() throws Exception {
curator.framework().create().forPath(myId);
}
@Override
public String toString() {
return "'" + barrierPath + "', " + barrierMemberCount() + " members";
}
public static Curator.CompletionWaiter create(Curator curator, Path barrierPath, String id) {
return new CuratorCompletionWaiter(curator, barrierPath.getAbsolute(), id, Clock.systemUTC());
}
public static Curator.CompletionWaiter createAndInitialize(Curator curator, Path parentPath, String waiterNode, String id) {
Path waiterPath = parentPath.append(waiterNode);
curator.delete(waiterPath);
curator.createAtomically(waiterPath);
return new CuratorCompletionWaiter(curator, waiterPath.getAbsolute(), id, Clock.systemUTC());
}
private int barrierMemberCount() {
return (curator.zooKeeperEnsembleCount() / 2) + 1;
}
} |
s/But // | private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).asSet().size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
} | private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
protected boolean maintain() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, List<Node>> retiredNodesByApplication = activeNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.collect(Collectors.groupingBy(node -> node.allocation().get().owner()));
for (Map.Entry<ApplicationId, List<Node>> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
List<Node> retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) return success;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
}
}
return true;
}
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
protected boolean maintain() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, List<Node>> retiredNodesByApplication = activeNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.collect(Collectors.groupingBy(node -> node.allocation().get().owner()));
for (Map.Entry<ApplicationId, List<Node>> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
List<Node> retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) return success;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
}
}
return true;
}
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
} | |
Done | private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).asSet().size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
} | private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
protected boolean maintain() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, List<Node>> retiredNodesByApplication = activeNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.collect(Collectors.groupingBy(node -> node.allocation().get().owner()));
for (Map.Entry<ApplicationId, List<Node>> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
List<Node> retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) return success;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
}
}
return true;
}
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
protected boolean maintain() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, List<Node>> retiredNodesByApplication = activeNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.collect(Collectors.groupingBy(node -> node.allocation().get().owner()));
for (Map.Entry<ApplicationId, List<Node>> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
List<Node> retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) return success;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
}
}
return true;
}
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
} | |
Nit: `NodeList` has `size` so no need to copy into a set. | private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).asSet().size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
} | if (activeNodes.nodeType(node.type()).asSet().size() < NUM_CONFIG_SERVERS) { | private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
protected boolean maintain() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, List<Node>> retiredNodesByApplication = activeNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.collect(Collectors.groupingBy(node -> node.allocation().get().owner()));
for (Map.Entry<ApplicationId, List<Node>> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
List<Node> retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) return success;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
}
}
return true;
}
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
protected boolean maintain() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, List<Node>> retiredNodesByApplication = activeNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.collect(Collectors.groupingBy(node -> node.allocation().get().owner()));
for (Map.Entry<ApplicationId, List<Node>> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
List<Node> retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) return success;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
}
}
return true;
}
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
} |
Fixed | private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).asSet().size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
} | if (activeNodes.nodeType(node.type()).asSet().size() < NUM_CONFIG_SERVERS) { | private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
protected boolean maintain() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, List<Node>> retiredNodesByApplication = activeNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.collect(Collectors.groupingBy(node -> node.allocation().get().owner()));
for (Map.Entry<ApplicationId, List<Node>> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
List<Node> retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) return success;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
}
}
return true;
}
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
protected boolean maintain() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, List<Node>> retiredNodesByApplication = activeNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.collect(Collectors.groupingBy(node -> node.allocation().get().owner()));
for (Map.Entry<ApplicationId, List<Node>> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
List<Node> retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) return success;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
}
}
return true;
}
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
} |
Should these be added to `AutoscalingMetrics.java`? | public List<String> metricResponseNames() {
return List.of("feed.http-requests.rate",
"vds.filestor.alldisks.allthreads.put.sum.count.rate",
"vds.filestor.alldisks.allthreads.remove.sum.count.rate",
"vds.filestor.alldisks.allthreads.update.sum.count.rate"); } | "vds.filestor.alldisks.allthreads.put.sum.count.rate", | public List<String> metricResponseNames() { return List.of("cpu.util"); } | class MetricsResponse {
/** Node level metrics */
private final Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics;
/**
* Cluster level metrics.
* Must be aggregated at fetch time to avoid issues with nodes and nodes joining/leaving the cluster over time.
*/
private final Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics = new HashMap<>();
/** Creates this from a metrics/V2 response */
public MetricsResponse(String response, NodeList applicationNodes, NodeRepository nodeRepository) {
this(SlimeUtils.jsonToSlime(response), applicationNodes, nodeRepository);
}
public MetricsResponse(Collection<Pair<String, NodeMetricSnapshot>> metrics) {
this.nodeMetrics = metrics;
}
private MetricsResponse(Slime response, NodeList applicationNodes, NodeRepository nodeRepository) {
nodeMetrics = new ArrayList<>();
Inspector root = response.get();
Inspector nodes = root.field("nodes");
nodes.traverse((ArrayTraverser)(__, node) -> consumeNode(node, applicationNodes, nodeRepository));
}
public Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics() { return nodeMetrics; }
public Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics() { return clusterMetrics; }
private void consumeNode(Inspector nodeObject, NodeList applicationNodes, NodeRepository nodeRepository) {
String hostname = nodeObject.field("hostname").asString();
Optional<Node> node = applicationNodes.stream().filter(n -> n.hostname().equals(hostname)).findAny();
if (node.isEmpty()) return;
ListMap<String, Double> nodeValues = new ListMap<>();
Instant at = consumeNodeMetrics(nodeObject.field("node"), nodeValues);
consumeServiceMetrics(nodeObject.field("services"), nodeValues);
nodeMetrics.add(new Pair<>(hostname, new NodeMetricSnapshot(at,
Metric.cpu.from(nodeValues),
Metric.memory.from(nodeValues),
Metric.disk.from(nodeValues),
(long)Metric.generation.from(nodeValues),
Metric.inService.from(nodeValues) > 0,
clusterIsStable(node.get(), applicationNodes, nodeRepository),
Metric.queryRate.from(nodeValues))));
var cluster = node.get().allocation().get().membership().cluster().id();
var metrics = clusterMetrics.getOrDefault(cluster, ClusterMetricSnapshot.empty(at));
metrics = metrics.withQueryRate(metrics.queryRate() + Metric.queryRate.from(nodeValues));
metrics = metrics.withWriteRate(metrics.queryRate() + Metric.writeRate.from(nodeValues));
clusterMetrics.put(cluster, metrics);
}
private Instant consumeNodeMetrics(Inspector nodeObject, ListMap<String, Double> nodeValues) {
long timestampSecond = nodeObject.field("timestamp").asLong();
Instant at = Instant.ofEpochMilli(timestampSecond * 1000);
nodeObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
return at;
}
private void consumeServiceMetrics(Inspector servicesObject, ListMap<String, Double> nodeValues) {
servicesObject.traverse((ArrayTraverser) (__, item) -> consumeServiceItem(item, nodeValues));
}
private void consumeServiceItem(Inspector serviceObject, ListMap<String, Double> nodeValues) {
serviceObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
}
private void consumeMetricsItem(Inspector item, ListMap<String, Double> values) {
item.field("values").traverse((ObjectTraverser)(name, value) -> values.put(name, value.asDouble()));
}
private boolean clusterIsStable(Node node, NodeList applicationNodes, NodeRepository nodeRepository) {
ClusterSpec cluster = node.allocation().get().membership().cluster();
return Autoscaler.stable(applicationNodes.cluster(cluster.id()), nodeRepository);
}
public static MetricsResponse empty() { return new MetricsResponse(List.of()); }
/** The metrics this can read */
private enum Metric {
cpu {
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
memory {
public List<String> metricResponseNames() { return List.of("mem.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
disk {
public List<String> metricResponseNames() { return List.of("disk.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
generation {
public List<String> metricResponseNames() { return List.of("application_generation"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).min().orElse(-1);
}
},
inService {
public List<String> metricResponseNames() { return List.of("in_service"); }
double computeFinal(List<Double> values) {
return values.stream().anyMatch(v -> v == 0) ? 0 : 1;
}
},
queryRate {
public List<String> metricResponseNames() {
return List.of("queries.rate",
"content.proton.documentdb.matching.queries.rate");
}
},
writeRate {
public List<String> metricResponseNames() {
return List.of("feed.http-requests.rate",
"vds.filestor.alldisks.allthreads.put.sum.count.rate",
"vds.filestor.alldisks.allthreads.remove.sum.count.rate",
"vds.filestor.alldisks.allthreads.update.sum.count.rate"); }
};
/** The name of this metric as emitted from its source */
public abstract List<String> metricResponseNames();
double computeFinal(List<Double> values) { return values.stream().mapToDouble(v -> v).sum(); }
public double from(ListMap<String, Double> metricValues) {
List<Double> values = new ArrayList<>(1);
for (String metricName : metricResponseNames()) {
List<Double> valuesForName = metricValues.get(metricName);
if (valuesForName == null) continue;
values.addAll(valuesForName);
}
return computeFinal(values);
}
}
} | class MetricsResponse {
/** Node level metrics */
private final Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics;
/**
* Cluster level metrics.
* Must be aggregated at fetch time to avoid issues with nodes and nodes joining/leaving the cluster over time.
*/
private final Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics = new HashMap<>();
/** Creates this from a metrics/V2 response */
public MetricsResponse(String response, NodeList applicationNodes, NodeRepository nodeRepository) {
this(SlimeUtils.jsonToSlime(response), applicationNodes, nodeRepository);
}
public MetricsResponse(Collection<Pair<String, NodeMetricSnapshot>> metrics) {
this.nodeMetrics = metrics;
}
private MetricsResponse(Slime response, NodeList applicationNodes, NodeRepository nodeRepository) {
nodeMetrics = new ArrayList<>();
Inspector root = response.get();
Inspector nodes = root.field("nodes");
nodes.traverse((ArrayTraverser)(__, node) -> consumeNode(node, applicationNodes, nodeRepository));
}
public Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics() { return nodeMetrics; }
public Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics() { return clusterMetrics; }
private void consumeNode(Inspector nodeObject, NodeList applicationNodes, NodeRepository nodeRepository) {
String hostname = nodeObject.field("hostname").asString();
Optional<Node> node = applicationNodes.stream().filter(n -> n.hostname().equals(hostname)).findAny();
if (node.isEmpty()) return;
ListMap<String, Double> nodeValues = new ListMap<>();
Instant at = consumeNodeMetrics(nodeObject.field("node"), nodeValues);
consumeServiceMetrics(nodeObject.field("services"), nodeValues);
nodeMetrics.add(new Pair<>(hostname, new NodeMetricSnapshot(at,
Metric.cpu.from(nodeValues),
Metric.memory.from(nodeValues),
Metric.disk.from(nodeValues),
(long)Metric.generation.from(nodeValues),
Metric.inService.from(nodeValues) > 0,
clusterIsStable(node.get(), applicationNodes, nodeRepository),
Metric.queryRate.from(nodeValues))));
var cluster = node.get().allocation().get().membership().cluster().id();
var metrics = clusterMetrics.getOrDefault(cluster, ClusterMetricSnapshot.empty(at));
metrics = metrics.withQueryRate(metrics.queryRate() + Metric.queryRate.from(nodeValues));
metrics = metrics.withWriteRate(metrics.queryRate() + Metric.writeRate.from(nodeValues));
clusterMetrics.put(cluster, metrics);
}
private Instant consumeNodeMetrics(Inspector nodeObject, ListMap<String, Double> nodeValues) {
long timestampSecond = nodeObject.field("timestamp").asLong();
Instant at = Instant.ofEpochMilli(timestampSecond * 1000);
nodeObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
return at;
}
private void consumeServiceMetrics(Inspector servicesObject, ListMap<String, Double> nodeValues) {
servicesObject.traverse((ArrayTraverser) (__, item) -> consumeServiceItem(item, nodeValues));
}
private void consumeServiceItem(Inspector serviceObject, ListMap<String, Double> nodeValues) {
serviceObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
}
private void consumeMetricsItem(Inspector item, ListMap<String, Double> values) {
item.field("values").traverse((ObjectTraverser)(name, value) -> values.put(name, value.asDouble()));
}
private boolean clusterIsStable(Node node, NodeList applicationNodes, NodeRepository nodeRepository) {
ClusterSpec cluster = node.allocation().get().membership().cluster();
return Autoscaler.stable(applicationNodes.cluster(cluster.id()), nodeRepository);
}
public static MetricsResponse empty() { return new MetricsResponse(List.of()); }
/** The metrics this can read */
private enum Metric {
cpu {
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
memory {
public List<String> metricResponseNames() { return List.of("mem.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
disk {
public List<String> metricResponseNames() { return List.of("disk.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
generation {
public List<String> metricResponseNames() { return List.of("application_generation"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).min().orElse(-1);
}
},
inService {
public List<String> metricResponseNames() { return List.of("in_service"); }
double computeFinal(List<Double> values) {
return values.stream().anyMatch(v -> v == 0) ? 0 : 1;
}
},
queryRate {
public List<String> metricResponseNames() {
return List.of("queries.rate",
"content.proton.documentdb.matching.queries.rate");
}
},
writeRate {
public List<String> metricResponseNames() {
return List.of("feed.http-requests.rate",
"vds.filestor.alldisks.allthreads.put.sum.count.rate",
"vds.filestor.alldisks.allthreads.remove.sum.count.rate",
"vds.filestor.alldisks.allthreads.update.sum.count.rate"); }
};
/** The name of this metric as emitted from its source */
public abstract List<String> metricResponseNames();
double computeFinal(List<Double> values) { return values.stream().mapToDouble(v -> v).sum(); }
public double from(ListMap<String, Double> metricValues) {
List<Double> values = new ArrayList<>(1);
for (String metricName : metricResponseNames()) {
List<Double> valuesForName = metricValues.get(metricName);
if (valuesForName == null) continue;
values.addAll(valuesForName);
}
return computeFinal(values);
}
}
} |
Should we return 0 if `values` is empty? | double computeFinal(List<Double> values) {
return values.stream().anyMatch(v -> v == 0) ? 0 : 1;
} | return values.stream().anyMatch(v -> v == 0) ? 0 : 1; | double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
} | class MetricsResponse {
/** Node level metrics */
private final Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics;
/**
* Cluster level metrics.
* Must be aggregated at fetch time to avoid issues with nodes and nodes joining/leaving the cluster over time.
*/
private final Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics = new HashMap<>();
/** Creates this from a metrics/V2 response */
public MetricsResponse(String response, NodeList applicationNodes, NodeRepository nodeRepository) {
this(SlimeUtils.jsonToSlime(response), applicationNodes, nodeRepository);
}
public MetricsResponse(Collection<Pair<String, NodeMetricSnapshot>> metrics) {
this.nodeMetrics = metrics;
}
private MetricsResponse(Slime response, NodeList applicationNodes, NodeRepository nodeRepository) {
nodeMetrics = new ArrayList<>();
Inspector root = response.get();
Inspector nodes = root.field("nodes");
nodes.traverse((ArrayTraverser)(__, node) -> consumeNode(node, applicationNodes, nodeRepository));
}
public Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics() { return nodeMetrics; }
public Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics() { return clusterMetrics; }
private void consumeNode(Inspector nodeObject, NodeList applicationNodes, NodeRepository nodeRepository) {
String hostname = nodeObject.field("hostname").asString();
Optional<Node> node = applicationNodes.stream().filter(n -> n.hostname().equals(hostname)).findAny();
if (node.isEmpty()) return;
ListMap<String, Double> nodeValues = new ListMap<>();
Instant at = consumeNodeMetrics(nodeObject.field("node"), nodeValues);
consumeServiceMetrics(nodeObject.field("services"), nodeValues);
nodeMetrics.add(new Pair<>(hostname, new NodeMetricSnapshot(at,
Metric.cpu.from(nodeValues),
Metric.memory.from(nodeValues),
Metric.disk.from(nodeValues),
(long)Metric.generation.from(nodeValues),
Metric.inService.from(nodeValues) > 0,
clusterIsStable(node.get(), applicationNodes, nodeRepository),
Metric.queryRate.from(nodeValues))));
var cluster = node.get().allocation().get().membership().cluster().id();
var metrics = clusterMetrics.getOrDefault(cluster, ClusterMetricSnapshot.empty(at));
metrics = metrics.withQueryRate(metrics.queryRate() + Metric.queryRate.from(nodeValues));
metrics = metrics.withWriteRate(metrics.queryRate() + Metric.writeRate.from(nodeValues));
clusterMetrics.put(cluster, metrics);
}
private Instant consumeNodeMetrics(Inspector nodeObject, ListMap<String, Double> nodeValues) {
long timestampSecond = nodeObject.field("timestamp").asLong();
Instant at = Instant.ofEpochMilli(timestampSecond * 1000);
nodeObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
return at;
}
private void consumeServiceMetrics(Inspector servicesObject, ListMap<String, Double> nodeValues) {
servicesObject.traverse((ArrayTraverser) (__, item) -> consumeServiceItem(item, nodeValues));
}
private void consumeServiceItem(Inspector serviceObject, ListMap<String, Double> nodeValues) {
serviceObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
}
private void consumeMetricsItem(Inspector item, ListMap<String, Double> values) {
item.field("values").traverse((ObjectTraverser)(name, value) -> values.put(name, value.asDouble()));
}
private boolean clusterIsStable(Node node, NodeList applicationNodes, NodeRepository nodeRepository) {
ClusterSpec cluster = node.allocation().get().membership().cluster();
return Autoscaler.stable(applicationNodes.cluster(cluster.id()), nodeRepository);
}
public static MetricsResponse empty() { return new MetricsResponse(List.of()); }
/** The metrics this can read */
private enum Metric {
cpu {
public List<String> metricResponseNames() { return List.of("cpu.util"); }
},
memory {
public List<String> metricResponseNames() { return List.of("mem.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
disk {
public List<String> metricResponseNames() { return List.of("disk.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
generation {
public List<String> metricResponseNames() { return List.of("application_generation"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).min().orElse(-1);
}
},
inService {
public List<String> metricResponseNames() { return List.of("in_service"); }
double computeFinal(List<Double> values) {
return values.stream().anyMatch(v -> v == 0) ? 0 : 1;
}
},
queryRate {
public List<String> metricResponseNames() {
return List.of("queries.rate",
"content.proton.documentdb.matching.queries.rate");
}
},
writeRate {
public List<String> metricResponseNames() {
return List.of("feed.http-requests.rate",
"vds.filestor.alldisks.allthreads.put.sum.count.rate",
"vds.filestor.alldisks.allthreads.remove.sum.count.rate",
"vds.filestor.alldisks.allthreads.update.sum.count.rate"); }
};
/** The name of this metric as emitted from its source */
public abstract List<String> metricResponseNames();
double computeFinal(List<Double> values) { return values.stream().mapToDouble(v -> v).sum(); }
public double from(ListMap<String, Double> metricValues) {
List<Double> values = new ArrayList<>(1);
for (String metricName : metricResponseNames()) {
List<Double> valuesForName = metricValues.get(metricName);
if (valuesForName == null) continue;
values.addAll(valuesForName);
}
return computeFinal(values);
}
}
} | class MetricsResponse {
/** Node level metrics */
private final Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics;
/**
* Cluster level metrics.
* Must be aggregated at fetch time to avoid issues with nodes and nodes joining/leaving the cluster over time.
*/
private final Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics = new HashMap<>();
/** Creates this from a metrics/V2 response */
public MetricsResponse(String response, NodeList applicationNodes, NodeRepository nodeRepository) {
this(SlimeUtils.jsonToSlime(response), applicationNodes, nodeRepository);
}
public MetricsResponse(Collection<Pair<String, NodeMetricSnapshot>> metrics) {
this.nodeMetrics = metrics;
}
private MetricsResponse(Slime response, NodeList applicationNodes, NodeRepository nodeRepository) {
nodeMetrics = new ArrayList<>();
Inspector root = response.get();
Inspector nodes = root.field("nodes");
nodes.traverse((ArrayTraverser)(__, node) -> consumeNode(node, applicationNodes, nodeRepository));
}
public Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics() { return nodeMetrics; }
public Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics() { return clusterMetrics; }
private void consumeNode(Inspector nodeObject, NodeList applicationNodes, NodeRepository nodeRepository) {
String hostname = nodeObject.field("hostname").asString();
Optional<Node> node = applicationNodes.stream().filter(n -> n.hostname().equals(hostname)).findAny();
if (node.isEmpty()) return;
ListMap<String, Double> nodeValues = new ListMap<>();
Instant at = consumeNodeMetrics(nodeObject.field("node"), nodeValues);
consumeServiceMetrics(nodeObject.field("services"), nodeValues);
nodeMetrics.add(new Pair<>(hostname, new NodeMetricSnapshot(at,
Metric.cpu.from(nodeValues),
Metric.memory.from(nodeValues),
Metric.disk.from(nodeValues),
(long)Metric.generation.from(nodeValues),
Metric.inService.from(nodeValues) > 0,
clusterIsStable(node.get(), applicationNodes, nodeRepository),
Metric.queryRate.from(nodeValues))));
var cluster = node.get().allocation().get().membership().cluster().id();
var metrics = clusterMetrics.getOrDefault(cluster, ClusterMetricSnapshot.empty(at));
metrics = metrics.withQueryRate(metrics.queryRate() + Metric.queryRate.from(nodeValues));
metrics = metrics.withWriteRate(metrics.queryRate() + Metric.writeRate.from(nodeValues));
clusterMetrics.put(cluster, metrics);
}
private Instant consumeNodeMetrics(Inspector nodeObject, ListMap<String, Double> nodeValues) {
long timestampSecond = nodeObject.field("timestamp").asLong();
Instant at = Instant.ofEpochMilli(timestampSecond * 1000);
nodeObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
return at;
}
private void consumeServiceMetrics(Inspector servicesObject, ListMap<String, Double> nodeValues) {
servicesObject.traverse((ArrayTraverser) (__, item) -> consumeServiceItem(item, nodeValues));
}
private void consumeServiceItem(Inspector serviceObject, ListMap<String, Double> nodeValues) {
serviceObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
}
private void consumeMetricsItem(Inspector item, ListMap<String, Double> values) {
item.field("values").traverse((ObjectTraverser)(name, value) -> values.put(name, value.asDouble()));
}
private boolean clusterIsStable(Node node, NodeList applicationNodes, NodeRepository nodeRepository) {
ClusterSpec cluster = node.allocation().get().membership().cluster();
return Autoscaler.stable(applicationNodes.cluster(cluster.id()), nodeRepository);
}
public static MetricsResponse empty() { return new MetricsResponse(List.of()); }
/** The metrics this can read */
private enum Metric {
cpu {
public List<String> metricResponseNames() { return List.of("cpu.util"); }
},
memory {
public List<String> metricResponseNames() { return List.of("mem.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
disk {
public List<String> metricResponseNames() { return List.of("disk.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
generation {
public List<String> metricResponseNames() { return List.of("application_generation"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).min().orElse(-1);
}
},
inService {
public List<String> metricResponseNames() { return List.of("in_service"); }
double computeFinal(List<Double> values) {
return values.stream().anyMatch(v -> v == 0) ? 0 : 1;
}
},
queryRate {
public List<String> metricResponseNames() {
return List.of("queries.rate",
"content.proton.documentdb.matching.queries.rate");
}
},
writeRate {
public List<String> metricResponseNames() {
return List.of("feed.http-requests.rate",
"vds.filestor.alldisks.allthreads.put.sum.count.rate",
"vds.filestor.alldisks.allthreads.remove.sum.count.rate",
"vds.filestor.alldisks.allthreads.update.sum.count.rate"); }
};
/** The name of this metric as emitted from its source */
public abstract List<String> metricResponseNames();
double computeFinal(List<Double> values) { return values.stream().mapToDouble(v -> v).sum(); }
public double from(ListMap<String, Double> metricValues) {
List<Double> values = new ArrayList<>(1);
for (String metricName : metricResponseNames()) {
List<Double> valuesForName = metricValues.get(metricName);
if (valuesForName == null) continue;
values.addAll(valuesForName);
}
return computeFinal(values);
}
}
} |
Yes, i forgot - thanks! | public List<String> metricResponseNames() {
return List.of("feed.http-requests.rate",
"vds.filestor.alldisks.allthreads.put.sum.count.rate",
"vds.filestor.alldisks.allthreads.remove.sum.count.rate",
"vds.filestor.alldisks.allthreads.update.sum.count.rate"); } | "vds.filestor.alldisks.allthreads.put.sum.count.rate", | public List<String> metricResponseNames() { return List.of("cpu.util"); } | class MetricsResponse {
/** Node level metrics */
private final Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics;
/**
* Cluster level metrics.
* Must be aggregated at fetch time to avoid issues with nodes and nodes joining/leaving the cluster over time.
*/
private final Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics = new HashMap<>();
/** Creates this from a metrics/V2 response */
public MetricsResponse(String response, NodeList applicationNodes, NodeRepository nodeRepository) {
this(SlimeUtils.jsonToSlime(response), applicationNodes, nodeRepository);
}
public MetricsResponse(Collection<Pair<String, NodeMetricSnapshot>> metrics) {
this.nodeMetrics = metrics;
}
private MetricsResponse(Slime response, NodeList applicationNodes, NodeRepository nodeRepository) {
nodeMetrics = new ArrayList<>();
Inspector root = response.get();
Inspector nodes = root.field("nodes");
nodes.traverse((ArrayTraverser)(__, node) -> consumeNode(node, applicationNodes, nodeRepository));
}
public Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics() { return nodeMetrics; }
public Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics() { return clusterMetrics; }
private void consumeNode(Inspector nodeObject, NodeList applicationNodes, NodeRepository nodeRepository) {
String hostname = nodeObject.field("hostname").asString();
Optional<Node> node = applicationNodes.stream().filter(n -> n.hostname().equals(hostname)).findAny();
if (node.isEmpty()) return;
ListMap<String, Double> nodeValues = new ListMap<>();
Instant at = consumeNodeMetrics(nodeObject.field("node"), nodeValues);
consumeServiceMetrics(nodeObject.field("services"), nodeValues);
nodeMetrics.add(new Pair<>(hostname, new NodeMetricSnapshot(at,
Metric.cpu.from(nodeValues),
Metric.memory.from(nodeValues),
Metric.disk.from(nodeValues),
(long)Metric.generation.from(nodeValues),
Metric.inService.from(nodeValues) > 0,
clusterIsStable(node.get(), applicationNodes, nodeRepository),
Metric.queryRate.from(nodeValues))));
var cluster = node.get().allocation().get().membership().cluster().id();
var metrics = clusterMetrics.getOrDefault(cluster, ClusterMetricSnapshot.empty(at));
metrics = metrics.withQueryRate(metrics.queryRate() + Metric.queryRate.from(nodeValues));
metrics = metrics.withWriteRate(metrics.queryRate() + Metric.writeRate.from(nodeValues));
clusterMetrics.put(cluster, metrics);
}
private Instant consumeNodeMetrics(Inspector nodeObject, ListMap<String, Double> nodeValues) {
long timestampSecond = nodeObject.field("timestamp").asLong();
Instant at = Instant.ofEpochMilli(timestampSecond * 1000);
nodeObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
return at;
}
private void consumeServiceMetrics(Inspector servicesObject, ListMap<String, Double> nodeValues) {
servicesObject.traverse((ArrayTraverser) (__, item) -> consumeServiceItem(item, nodeValues));
}
private void consumeServiceItem(Inspector serviceObject, ListMap<String, Double> nodeValues) {
serviceObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
}
private void consumeMetricsItem(Inspector item, ListMap<String, Double> values) {
item.field("values").traverse((ObjectTraverser)(name, value) -> values.put(name, value.asDouble()));
}
private boolean clusterIsStable(Node node, NodeList applicationNodes, NodeRepository nodeRepository) {
ClusterSpec cluster = node.allocation().get().membership().cluster();
return Autoscaler.stable(applicationNodes.cluster(cluster.id()), nodeRepository);
}
public static MetricsResponse empty() { return new MetricsResponse(List.of()); }
/** The metrics this can read */
private enum Metric {
cpu {
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
memory {
public List<String> metricResponseNames() { return List.of("mem.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
disk {
public List<String> metricResponseNames() { return List.of("disk.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
generation {
public List<String> metricResponseNames() { return List.of("application_generation"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).min().orElse(-1);
}
},
inService {
public List<String> metricResponseNames() { return List.of("in_service"); }
double computeFinal(List<Double> values) {
return values.stream().anyMatch(v -> v == 0) ? 0 : 1;
}
},
queryRate {
public List<String> metricResponseNames() {
return List.of("queries.rate",
"content.proton.documentdb.matching.queries.rate");
}
},
writeRate {
public List<String> metricResponseNames() {
return List.of("feed.http-requests.rate",
"vds.filestor.alldisks.allthreads.put.sum.count.rate",
"vds.filestor.alldisks.allthreads.remove.sum.count.rate",
"vds.filestor.alldisks.allthreads.update.sum.count.rate"); }
};
/** The name of this metric as emitted from its source */
public abstract List<String> metricResponseNames();
double computeFinal(List<Double> values) { return values.stream().mapToDouble(v -> v).sum(); }
public double from(ListMap<String, Double> metricValues) {
List<Double> values = new ArrayList<>(1);
for (String metricName : metricResponseNames()) {
List<Double> valuesForName = metricValues.get(metricName);
if (valuesForName == null) continue;
values.addAll(valuesForName);
}
return computeFinal(values);
}
}
} | class MetricsResponse {
/** Node level metrics */
private final Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics;
/**
* Cluster level metrics.
* Must be aggregated at fetch time to avoid issues with nodes and nodes joining/leaving the cluster over time.
*/
private final Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics = new HashMap<>();
/** Creates this from a metrics/V2 response */
public MetricsResponse(String response, NodeList applicationNodes, NodeRepository nodeRepository) {
this(SlimeUtils.jsonToSlime(response), applicationNodes, nodeRepository);
}
public MetricsResponse(Collection<Pair<String, NodeMetricSnapshot>> metrics) {
this.nodeMetrics = metrics;
}
private MetricsResponse(Slime response, NodeList applicationNodes, NodeRepository nodeRepository) {
nodeMetrics = new ArrayList<>();
Inspector root = response.get();
Inspector nodes = root.field("nodes");
nodes.traverse((ArrayTraverser)(__, node) -> consumeNode(node, applicationNodes, nodeRepository));
}
public Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics() { return nodeMetrics; }
public Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics() { return clusterMetrics; }
private void consumeNode(Inspector nodeObject, NodeList applicationNodes, NodeRepository nodeRepository) {
String hostname = nodeObject.field("hostname").asString();
Optional<Node> node = applicationNodes.stream().filter(n -> n.hostname().equals(hostname)).findAny();
if (node.isEmpty()) return;
ListMap<String, Double> nodeValues = new ListMap<>();
Instant at = consumeNodeMetrics(nodeObject.field("node"), nodeValues);
consumeServiceMetrics(nodeObject.field("services"), nodeValues);
nodeMetrics.add(new Pair<>(hostname, new NodeMetricSnapshot(at,
Metric.cpu.from(nodeValues),
Metric.memory.from(nodeValues),
Metric.disk.from(nodeValues),
(long)Metric.generation.from(nodeValues),
Metric.inService.from(nodeValues) > 0,
clusterIsStable(node.get(), applicationNodes, nodeRepository),
Metric.queryRate.from(nodeValues))));
var cluster = node.get().allocation().get().membership().cluster().id();
var metrics = clusterMetrics.getOrDefault(cluster, ClusterMetricSnapshot.empty(at));
metrics = metrics.withQueryRate(metrics.queryRate() + Metric.queryRate.from(nodeValues));
metrics = metrics.withWriteRate(metrics.queryRate() + Metric.writeRate.from(nodeValues));
clusterMetrics.put(cluster, metrics);
}
private Instant consumeNodeMetrics(Inspector nodeObject, ListMap<String, Double> nodeValues) {
long timestampSecond = nodeObject.field("timestamp").asLong();
Instant at = Instant.ofEpochMilli(timestampSecond * 1000);
nodeObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
return at;
}
private void consumeServiceMetrics(Inspector servicesObject, ListMap<String, Double> nodeValues) {
servicesObject.traverse((ArrayTraverser) (__, item) -> consumeServiceItem(item, nodeValues));
}
private void consumeServiceItem(Inspector serviceObject, ListMap<String, Double> nodeValues) {
serviceObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
}
private void consumeMetricsItem(Inspector item, ListMap<String, Double> values) {
item.field("values").traverse((ObjectTraverser)(name, value) -> values.put(name, value.asDouble()));
}
private boolean clusterIsStable(Node node, NodeList applicationNodes, NodeRepository nodeRepository) {
ClusterSpec cluster = node.allocation().get().membership().cluster();
return Autoscaler.stable(applicationNodes.cluster(cluster.id()), nodeRepository);
}
public static MetricsResponse empty() { return new MetricsResponse(List.of()); }
/** The metrics this can read */
private enum Metric {
cpu {
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
memory {
public List<String> metricResponseNames() { return List.of("mem.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
disk {
public List<String> metricResponseNames() { return List.of("disk.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
generation {
public List<String> metricResponseNames() { return List.of("application_generation"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).min().orElse(-1);
}
},
inService {
public List<String> metricResponseNames() { return List.of("in_service"); }
double computeFinal(List<Double> values) {
return values.stream().anyMatch(v -> v == 0) ? 0 : 1;
}
},
queryRate {
public List<String> metricResponseNames() {
return List.of("queries.rate",
"content.proton.documentdb.matching.queries.rate");
}
},
writeRate {
public List<String> metricResponseNames() {
return List.of("feed.http-requests.rate",
"vds.filestor.alldisks.allthreads.put.sum.count.rate",
"vds.filestor.alldisks.allthreads.remove.sum.count.rate",
"vds.filestor.alldisks.allthreads.update.sum.count.rate"); }
};
/** The name of this metric as emitted from its source */
public abstract List<String> metricResponseNames();
double computeFinal(List<Double> values) { return values.stream().mapToDouble(v -> v).sum(); }
public double from(ListMap<String, Double> metricValues) {
List<Double> values = new ArrayList<>(1);
for (String metricName : metricResponseNames()) {
List<Double> valuesForName = metricValues.get(metricName);
if (valuesForName == null) continue;
values.addAll(valuesForName);
}
return computeFinal(values);
}
}
} |
Not all services send this (yet) so it needs to default to 1. | double computeFinal(List<Double> values) {
return values.stream().anyMatch(v -> v == 0) ? 0 : 1;
} | return values.stream().anyMatch(v -> v == 0) ? 0 : 1; | double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
} | class MetricsResponse {
/** Node level metrics */
private final Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics;
/**
* Cluster level metrics.
* Must be aggregated at fetch time to avoid issues with nodes and nodes joining/leaving the cluster over time.
*/
private final Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics = new HashMap<>();
/** Creates this from a metrics/V2 response */
public MetricsResponse(String response, NodeList applicationNodes, NodeRepository nodeRepository) {
this(SlimeUtils.jsonToSlime(response), applicationNodes, nodeRepository);
}
public MetricsResponse(Collection<Pair<String, NodeMetricSnapshot>> metrics) {
this.nodeMetrics = metrics;
}
private MetricsResponse(Slime response, NodeList applicationNodes, NodeRepository nodeRepository) {
nodeMetrics = new ArrayList<>();
Inspector root = response.get();
Inspector nodes = root.field("nodes");
nodes.traverse((ArrayTraverser)(__, node) -> consumeNode(node, applicationNodes, nodeRepository));
}
public Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics() { return nodeMetrics; }
public Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics() { return clusterMetrics; }
private void consumeNode(Inspector nodeObject, NodeList applicationNodes, NodeRepository nodeRepository) {
String hostname = nodeObject.field("hostname").asString();
Optional<Node> node = applicationNodes.stream().filter(n -> n.hostname().equals(hostname)).findAny();
if (node.isEmpty()) return;
ListMap<String, Double> nodeValues = new ListMap<>();
Instant at = consumeNodeMetrics(nodeObject.field("node"), nodeValues);
consumeServiceMetrics(nodeObject.field("services"), nodeValues);
nodeMetrics.add(new Pair<>(hostname, new NodeMetricSnapshot(at,
Metric.cpu.from(nodeValues),
Metric.memory.from(nodeValues),
Metric.disk.from(nodeValues),
(long)Metric.generation.from(nodeValues),
Metric.inService.from(nodeValues) > 0,
clusterIsStable(node.get(), applicationNodes, nodeRepository),
Metric.queryRate.from(nodeValues))));
var cluster = node.get().allocation().get().membership().cluster().id();
var metrics = clusterMetrics.getOrDefault(cluster, ClusterMetricSnapshot.empty(at));
metrics = metrics.withQueryRate(metrics.queryRate() + Metric.queryRate.from(nodeValues));
metrics = metrics.withWriteRate(metrics.queryRate() + Metric.writeRate.from(nodeValues));
clusterMetrics.put(cluster, metrics);
}
private Instant consumeNodeMetrics(Inspector nodeObject, ListMap<String, Double> nodeValues) {
long timestampSecond = nodeObject.field("timestamp").asLong();
Instant at = Instant.ofEpochMilli(timestampSecond * 1000);
nodeObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
return at;
}
private void consumeServiceMetrics(Inspector servicesObject, ListMap<String, Double> nodeValues) {
servicesObject.traverse((ArrayTraverser) (__, item) -> consumeServiceItem(item, nodeValues));
}
private void consumeServiceItem(Inspector serviceObject, ListMap<String, Double> nodeValues) {
serviceObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
}
private void consumeMetricsItem(Inspector item, ListMap<String, Double> values) {
item.field("values").traverse((ObjectTraverser)(name, value) -> values.put(name, value.asDouble()));
}
private boolean clusterIsStable(Node node, NodeList applicationNodes, NodeRepository nodeRepository) {
ClusterSpec cluster = node.allocation().get().membership().cluster();
return Autoscaler.stable(applicationNodes.cluster(cluster.id()), nodeRepository);
}
public static MetricsResponse empty() { return new MetricsResponse(List.of()); }
/** The metrics this can read */
private enum Metric {
cpu {
public List<String> metricResponseNames() { return List.of("cpu.util"); }
},
memory {
public List<String> metricResponseNames() { return List.of("mem.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
disk {
public List<String> metricResponseNames() { return List.of("disk.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
generation {
public List<String> metricResponseNames() { return List.of("application_generation"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).min().orElse(-1);
}
},
inService {
public List<String> metricResponseNames() { return List.of("in_service"); }
double computeFinal(List<Double> values) {
return values.stream().anyMatch(v -> v == 0) ? 0 : 1;
}
},
queryRate {
public List<String> metricResponseNames() {
return List.of("queries.rate",
"content.proton.documentdb.matching.queries.rate");
}
},
writeRate {
public List<String> metricResponseNames() {
return List.of("feed.http-requests.rate",
"vds.filestor.alldisks.allthreads.put.sum.count.rate",
"vds.filestor.alldisks.allthreads.remove.sum.count.rate",
"vds.filestor.alldisks.allthreads.update.sum.count.rate"); }
};
/** The name of this metric as emitted from its source */
public abstract List<String> metricResponseNames();
double computeFinal(List<Double> values) { return values.stream().mapToDouble(v -> v).sum(); }
public double from(ListMap<String, Double> metricValues) {
List<Double> values = new ArrayList<>(1);
for (String metricName : metricResponseNames()) {
List<Double> valuesForName = metricValues.get(metricName);
if (valuesForName == null) continue;
values.addAll(valuesForName);
}
return computeFinal(values);
}
}
} | class MetricsResponse {
/** Node level metrics */
private final Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics;
/**
* Cluster level metrics.
* Must be aggregated at fetch time to avoid issues with nodes and nodes joining/leaving the cluster over time.
*/
private final Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics = new HashMap<>();
/** Creates this from a metrics/V2 response */
public MetricsResponse(String response, NodeList applicationNodes, NodeRepository nodeRepository) {
this(SlimeUtils.jsonToSlime(response), applicationNodes, nodeRepository);
}
public MetricsResponse(Collection<Pair<String, NodeMetricSnapshot>> metrics) {
this.nodeMetrics = metrics;
}
private MetricsResponse(Slime response, NodeList applicationNodes, NodeRepository nodeRepository) {
nodeMetrics = new ArrayList<>();
Inspector root = response.get();
Inspector nodes = root.field("nodes");
nodes.traverse((ArrayTraverser)(__, node) -> consumeNode(node, applicationNodes, nodeRepository));
}
public Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics() { return nodeMetrics; }
public Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics() { return clusterMetrics; }
private void consumeNode(Inspector nodeObject, NodeList applicationNodes, NodeRepository nodeRepository) {
String hostname = nodeObject.field("hostname").asString();
Optional<Node> node = applicationNodes.stream().filter(n -> n.hostname().equals(hostname)).findAny();
if (node.isEmpty()) return;
ListMap<String, Double> nodeValues = new ListMap<>();
Instant at = consumeNodeMetrics(nodeObject.field("node"), nodeValues);
consumeServiceMetrics(nodeObject.field("services"), nodeValues);
nodeMetrics.add(new Pair<>(hostname, new NodeMetricSnapshot(at,
Metric.cpu.from(nodeValues),
Metric.memory.from(nodeValues),
Metric.disk.from(nodeValues),
(long)Metric.generation.from(nodeValues),
Metric.inService.from(nodeValues) > 0,
clusterIsStable(node.get(), applicationNodes, nodeRepository),
Metric.queryRate.from(nodeValues))));
var cluster = node.get().allocation().get().membership().cluster().id();
var metrics = clusterMetrics.getOrDefault(cluster, ClusterMetricSnapshot.empty(at));
metrics = metrics.withQueryRate(metrics.queryRate() + Metric.queryRate.from(nodeValues));
metrics = metrics.withWriteRate(metrics.queryRate() + Metric.writeRate.from(nodeValues));
clusterMetrics.put(cluster, metrics);
}
private Instant consumeNodeMetrics(Inspector nodeObject, ListMap<String, Double> nodeValues) {
long timestampSecond = nodeObject.field("timestamp").asLong();
Instant at = Instant.ofEpochMilli(timestampSecond * 1000);
nodeObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
return at;
}
private void consumeServiceMetrics(Inspector servicesObject, ListMap<String, Double> nodeValues) {
servicesObject.traverse((ArrayTraverser) (__, item) -> consumeServiceItem(item, nodeValues));
}
private void consumeServiceItem(Inspector serviceObject, ListMap<String, Double> nodeValues) {
serviceObject.field("metrics").traverse((ArrayTraverser) (__, item) -> consumeMetricsItem(item, nodeValues));
}
private void consumeMetricsItem(Inspector item, ListMap<String, Double> values) {
item.field("values").traverse((ObjectTraverser)(name, value) -> values.put(name, value.asDouble()));
}
private boolean clusterIsStable(Node node, NodeList applicationNodes, NodeRepository nodeRepository) {
ClusterSpec cluster = node.allocation().get().membership().cluster();
return Autoscaler.stable(applicationNodes.cluster(cluster.id()), nodeRepository);
}
public static MetricsResponse empty() { return new MetricsResponse(List.of()); }
/** The metrics this can read */
private enum Metric {
cpu {
public List<String> metricResponseNames() { return List.of("cpu.util"); }
},
memory {
public List<String> metricResponseNames() { return List.of("mem.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
disk {
public List<String> metricResponseNames() { return List.of("disk.util"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).average().orElse(0) / 100;
}
},
generation {
public List<String> metricResponseNames() { return List.of("application_generation"); }
double computeFinal(List<Double> values) {
return values.stream().mapToDouble(v -> v).min().orElse(-1);
}
},
inService {
public List<String> metricResponseNames() { return List.of("in_service"); }
double computeFinal(List<Double> values) {
return values.stream().anyMatch(v -> v == 0) ? 0 : 1;
}
},
queryRate {
public List<String> metricResponseNames() {
return List.of("queries.rate",
"content.proton.documentdb.matching.queries.rate");
}
},
writeRate {
public List<String> metricResponseNames() {
return List.of("feed.http-requests.rate",
"vds.filestor.alldisks.allthreads.put.sum.count.rate",
"vds.filestor.alldisks.allthreads.remove.sum.count.rate",
"vds.filestor.alldisks.allthreads.update.sum.count.rate"); }
};
/** The name of this metric as emitted from its source */
public abstract List<String> metricResponseNames();
double computeFinal(List<Double> values) { return values.stream().mapToDouble(v -> v).sum(); }
public double from(ListMap<String, Double> metricValues) {
List<Double> values = new ArrayList<>(1);
for (String metricName : metricResponseNames()) {
List<Double> valuesForName = metricValues.get(metricName);
if (valuesForName == null) continue;
values.addAll(valuesForName);
}
return computeFinal(values);
}
}
} |
FYI: There is a `NodeList::retired` method that does all this | public void config_server_reprovisioning() throws OrchestrationException {
NodeList configServers = tester.makeConfigServers(3, "default", Version.emptyVersion);
var cfg1 = new HostName("cfg1");
assertEquals(Set.of(cfg1.s(), "cfg2", "cfg3"), configServers.stream().map(Node::hostname).collect(Collectors.toSet()));
var configServerApplication = new ConfigServerApplication();
var duperModel = new MockDuperModel().support(configServerApplication);
InfraDeployerImpl infraDeployer = new InfraDeployerImpl(tester.nodeRepository(), tester.provisioner(), duperModel);
var deployer = mock(Deployer.class);
when(deployer.deployFromLocalActive(eq(configServerApplication.getApplicationId())))
.thenAnswer(invocation -> infraDeployer.getDeployment(configServerApplication.getApplicationId()));
List<Node> wantToRetireNodes = tester.nodeRepository().nodes()
.retire(NodeTypeFilter.from(NodeType.config, null), Agent.operator, Instant.now());
assertEquals(3, wantToRetireNodes.size());
infraDeployer.activateAllSupportedInfraApplications(true);
List<Node> retiredNodes = tester.nodeRepository()
.nodes().list(() -> {})
.stream()
.filter(node -> node.allocation().map(allocation -> allocation.membership().retired()).orElse(false))
.collect(Collectors.toList());
assertEquals(3, retiredNodes.size());
Node retiredNode = tester.nodeRepository().nodes().node(cfg1.s()).orElseThrow();
doThrow(new OrchestrationException("denied")).when(orchestrator).acquirePermissionToRemove(any());
doNothing().when(orchestrator).acquirePermissionToRemove(eq(new HostName(retiredNode.hostname())));
RetiredExpirer retiredExpirer = createRetiredExpirer(deployer);
retiredExpirer.run();
var activeConfigServerHostnames = new HashSet<>(Set.of("cfg1", "cfg2", "cfg3"));
assertTrue(activeConfigServerHostnames.contains(retiredNode.hostname()));
activeConfigServerHostnames.remove(retiredNode.hostname());
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.inactive).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.inactive).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
nodeRepository.nodes().deallocate(retiredNode, Agent.InactiveExpirer, "expired");
retiredNode = tester.nodeRepository().nodes().list(Node.State.parked).nodeType(NodeType.config).asList().get(0);
nodeRepository.nodes().removeRecursively(retiredNode, true);
infraDeployer.activateAllSupportedInfraApplications(true);
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(2, tester.nodeRepository().nodes().list().nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
MockNameResolver nameResolver = (MockNameResolver)tester.nodeRepository().nameResolver();
String ipv4 = "127.0.1.4";
nameResolver.addRecord(retiredNode.hostname(), ipv4);
Node node = Node.create(retiredNode.hostname(), new IP.Config(Set.of(ipv4), Set.of()), retiredNode.hostname(),
tester.asFlavor("default", NodeType.config), NodeType.config).build();
var nodes = List.of(node);
nodes = nodeRepository.nodes().addNodes(nodes, Agent.system);
nodes = nodeRepository.nodes().deallocate(nodes, Agent.system, getClass().getSimpleName());
nodeRepository.nodes().setReady(nodes, Agent.system, getClass().getSimpleName());
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.ready).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
infraDeployer.activateAllSupportedInfraApplications(true);
assertEquals(3, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
retiredExpirer.run();
assertEquals(3, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
var retiredHostnames = tester.nodeRepository()
.nodes().list(() -> {})
.stream()
.filter(n -> n.allocation().map(allocation -> allocation.membership().retired()).orElse(false))
.map(Node::hostname)
.collect(Collectors.toSet());
assertEquals(Set.of("cfg2", "cfg3"), retiredHostnames);
} | List<Node> retiredNodes = tester.nodeRepository() | public void config_server_reprovisioning() throws OrchestrationException {
NodeList configServers = tester.makeConfigServers(3, "default", Version.emptyVersion);
var cfg1 = new HostName("cfg1");
assertEquals(Set.of(cfg1.s(), "cfg2", "cfg3"), configServers.stream().map(Node::hostname).collect(Collectors.toSet()));
var configServerApplication = new ConfigServerApplication();
var duperModel = new MockDuperModel().support(configServerApplication);
InfraDeployerImpl infraDeployer = new InfraDeployerImpl(tester.nodeRepository(), tester.provisioner(), duperModel);
var deployer = mock(Deployer.class);
when(deployer.deployFromLocalActive(eq(configServerApplication.getApplicationId())))
.thenAnswer(invocation -> infraDeployer.getDeployment(configServerApplication.getApplicationId()));
List<Node> wantToRetireNodes = tester.nodeRepository().nodes()
.retire(NodeTypeFilter.from(NodeType.config, null), Agent.operator, Instant.now());
assertEquals(3, wantToRetireNodes.size());
infraDeployer.activateAllSupportedInfraApplications(true);
List<Node> retiredNodes = tester.nodeRepository().nodes().list().retired().asList();
assertEquals(3, retiredNodes.size());
Node retiredNode = tester.nodeRepository().nodes().node(cfg1.s()).orElseThrow();
doThrow(new OrchestrationException("denied")).when(orchestrator).acquirePermissionToRemove(any());
doNothing().when(orchestrator).acquirePermissionToRemove(eq(new HostName(retiredNode.hostname())));
RetiredExpirer retiredExpirer = createRetiredExpirer(deployer);
retiredExpirer.run();
var activeConfigServerHostnames = new HashSet<>(Set.of("cfg1", "cfg2", "cfg3"));
assertTrue(activeConfigServerHostnames.contains(retiredNode.hostname()));
activeConfigServerHostnames.remove(retiredNode.hostname());
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.inactive).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.inactive).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
nodeRepository.nodes().deallocate(retiredNode, Agent.InactiveExpirer, "expired");
retiredNode = tester.nodeRepository().nodes().list(Node.State.parked).nodeType(NodeType.config).asList().get(0);
nodeRepository.nodes().removeRecursively(retiredNode, true);
infraDeployer.activateAllSupportedInfraApplications(true);
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(2, tester.nodeRepository().nodes().list().nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
MockNameResolver nameResolver = (MockNameResolver)tester.nodeRepository().nameResolver();
String ipv4 = "127.0.1.4";
nameResolver.addRecord(retiredNode.hostname(), ipv4);
Node node = Node.create(retiredNode.hostname(), new IP.Config(Set.of(ipv4), Set.of()), retiredNode.hostname(),
tester.asFlavor("default", NodeType.config), NodeType.config).build();
var nodes = List.of(node);
nodes = nodeRepository.nodes().addNodes(nodes, Agent.system);
nodes = nodeRepository.nodes().deallocate(nodes, Agent.system, getClass().getSimpleName());
nodeRepository.nodes().setReady(nodes, Agent.system, getClass().getSimpleName());
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.ready).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
infraDeployer.activateAllSupportedInfraApplications(true);
assertEquals(3, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
retiredExpirer.run();
assertEquals(3, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
var retiredHostnames = tester.nodeRepository()
.nodes().list(() -> {})
.stream()
.filter(n -> n.allocation().map(allocation -> allocation.membership().retired()).orElse(false))
.map(Node::hostname)
.collect(Collectors.toSet());
assertEquals(Set.of("cfg2", "cfg3"), retiredHostnames);
} | class RetiredExpirerTest {
private final NodeResources hostResources = new NodeResources(64, 128, 2000, 10);
private final NodeResources nodeResources = new NodeResources(2, 8, 50, 1);
private final ProvisioningTester tester = new ProvisioningTester.Builder().build();
private final ManualClock clock = tester.clock();
private final NodeRepository nodeRepository = tester.nodeRepository();
private final NodeRepositoryProvisioner provisioner = tester.provisioner();
private final Orchestrator orchestrator = mock(Orchestrator.class);
private static final Duration RETIRED_EXPIRATION = Duration.ofHours(12);
@Before
public void setup() throws OrchestrationException {
doThrow(new RuntimeException()).when(orchestrator).acquirePermissionToRemove(any());
}
@Test
public void ensure_retired_nodes_time_out() {
tester.makeReadyNodes(7, nodeResources);
tester.makeReadyHosts(4, hostResources);
ApplicationId applicationId = ApplicationId.from(TenantName.from("foo"), ApplicationName.from("bar"), InstanceName.from("fuz"));
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
int wantedNodes;
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
clock.advance(Duration.ofHours(30));
MockDeployer deployer =
new MockDeployer(provisioner,
clock,
Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId,
cluster,
Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources)))));
createRetiredExpirer(deployer).run();
assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
@Test
public void ensure_early_inactivation() throws OrchestrationException {
tester.makeReadyNodes(7, nodeResources);
tester.makeReadyHosts(4, hostResources);
ApplicationId applicationId = ApplicationId.from(TenantName.from("foo"), ApplicationName.from("bar"), InstanceName.from("fuz"));
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
int wantedNodes;
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
MockDeployer deployer =
new MockDeployer(provisioner,
clock,
Collections.singletonMap(
applicationId,
new MockDeployer.ApplicationContext(applicationId,
cluster,
Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources)))));
doNothing()
.doThrow(new OrchestrationException("Permission not granted 1"))
.doNothing()
.doThrow(new OrchestrationException("Permission not granted 2"))
.when(orchestrator).acquirePermissionToRemove(any());
RetiredExpirer retiredExpirer = createRetiredExpirer(deployer);
retiredExpirer.run();
assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(4)).acquirePermissionToRemove(any());
retiredExpirer.run();
assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
clock.advance(RETIRED_EXPIRATION.plusMinutes(1));
retiredExpirer.run();
assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(2, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
@Test
private Set<String> configServerHostnames(MockDuperModel duperModel) {
return duperModel.hostnamesOf(new ConfigServerApplication().getApplicationId()).stream()
.map(com.yahoo.config.provision.HostName::value)
.collect(Collectors.toSet());
}
private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodes, int groups) {
Capacity capacity = Capacity.from(new ClusterResources(nodes, groups, nodeResources));
tester.activate(applicationId, tester.prepare(applicationId, cluster, capacity));
}
private RetiredExpirer createRetiredExpirer(Deployer deployer) {
return new RetiredExpirer(nodeRepository,
orchestrator,
deployer,
new TestMetric(),
Duration.ofDays(30), /* Maintenance interval, use large value so it never runs by itself */
RETIRED_EXPIRATION);
}
} | class RetiredExpirerTest {
private final NodeResources hostResources = new NodeResources(64, 128, 2000, 10);
private final NodeResources nodeResources = new NodeResources(2, 8, 50, 1);
private final ProvisioningTester tester = new ProvisioningTester.Builder().build();
private final ManualClock clock = tester.clock();
private final NodeRepository nodeRepository = tester.nodeRepository();
private final NodeRepositoryProvisioner provisioner = tester.provisioner();
private final Orchestrator orchestrator = mock(Orchestrator.class);
private static final Duration RETIRED_EXPIRATION = Duration.ofHours(12);
@Before
public void setup() throws OrchestrationException {
doThrow(new RuntimeException()).when(orchestrator).acquirePermissionToRemove(any());
}
@Test
public void ensure_retired_nodes_time_out() {
tester.makeReadyNodes(7, nodeResources);
tester.makeReadyHosts(4, hostResources);
ApplicationId applicationId = ApplicationId.from(TenantName.from("foo"), ApplicationName.from("bar"), InstanceName.from("fuz"));
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
int wantedNodes;
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
clock.advance(Duration.ofHours(30));
MockDeployer deployer =
new MockDeployer(provisioner,
clock,
Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId,
cluster,
Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources)))));
createRetiredExpirer(deployer).run();
assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
@Test
public void ensure_early_inactivation() throws OrchestrationException {
tester.makeReadyNodes(7, nodeResources);
tester.makeReadyHosts(4, hostResources);
ApplicationId applicationId = ApplicationId.from(TenantName.from("foo"), ApplicationName.from("bar"), InstanceName.from("fuz"));
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
int wantedNodes;
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
MockDeployer deployer =
new MockDeployer(provisioner,
clock,
Collections.singletonMap(
applicationId,
new MockDeployer.ApplicationContext(applicationId,
cluster,
Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources)))));
doNothing()
.doThrow(new OrchestrationException("Permission not granted 1"))
.doNothing()
.doThrow(new OrchestrationException("Permission not granted 2"))
.when(orchestrator).acquirePermissionToRemove(any());
RetiredExpirer retiredExpirer = createRetiredExpirer(deployer);
retiredExpirer.run();
assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(4)).acquirePermissionToRemove(any());
retiredExpirer.run();
assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
clock.advance(RETIRED_EXPIRATION.plusMinutes(1));
retiredExpirer.run();
assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(2, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
@Test
private Set<String> configServerHostnames(MockDuperModel duperModel) {
return duperModel.hostnamesOf(new ConfigServerApplication().getApplicationId()).stream()
.map(com.yahoo.config.provision.HostName::value)
.collect(Collectors.toSet());
}
private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodes, int groups) {
Capacity capacity = Capacity.from(new ClusterResources(nodes, groups, nodeResources));
tester.activate(applicationId, tester.prepare(applicationId, cluster, capacity));
}
private RetiredExpirer createRetiredExpirer(Deployer deployer) {
return new RetiredExpirer(nodeRepository,
orchestrator,
deployer,
new TestMetric(),
Duration.ofDays(30), /* Maintenance interval, use large value so it never runs by itself */
RETIRED_EXPIRATION);
}
} |
Changed accordingly | public void config_server_reprovisioning() throws OrchestrationException {
NodeList configServers = tester.makeConfigServers(3, "default", Version.emptyVersion);
var cfg1 = new HostName("cfg1");
assertEquals(Set.of(cfg1.s(), "cfg2", "cfg3"), configServers.stream().map(Node::hostname).collect(Collectors.toSet()));
var configServerApplication = new ConfigServerApplication();
var duperModel = new MockDuperModel().support(configServerApplication);
InfraDeployerImpl infraDeployer = new InfraDeployerImpl(tester.nodeRepository(), tester.provisioner(), duperModel);
var deployer = mock(Deployer.class);
when(deployer.deployFromLocalActive(eq(configServerApplication.getApplicationId())))
.thenAnswer(invocation -> infraDeployer.getDeployment(configServerApplication.getApplicationId()));
List<Node> wantToRetireNodes = tester.nodeRepository().nodes()
.retire(NodeTypeFilter.from(NodeType.config, null), Agent.operator, Instant.now());
assertEquals(3, wantToRetireNodes.size());
infraDeployer.activateAllSupportedInfraApplications(true);
List<Node> retiredNodes = tester.nodeRepository()
.nodes().list(() -> {})
.stream()
.filter(node -> node.allocation().map(allocation -> allocation.membership().retired()).orElse(false))
.collect(Collectors.toList());
assertEquals(3, retiredNodes.size());
Node retiredNode = tester.nodeRepository().nodes().node(cfg1.s()).orElseThrow();
doThrow(new OrchestrationException("denied")).when(orchestrator).acquirePermissionToRemove(any());
doNothing().when(orchestrator).acquirePermissionToRemove(eq(new HostName(retiredNode.hostname())));
RetiredExpirer retiredExpirer = createRetiredExpirer(deployer);
retiredExpirer.run();
var activeConfigServerHostnames = new HashSet<>(Set.of("cfg1", "cfg2", "cfg3"));
assertTrue(activeConfigServerHostnames.contains(retiredNode.hostname()));
activeConfigServerHostnames.remove(retiredNode.hostname());
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.inactive).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.inactive).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
nodeRepository.nodes().deallocate(retiredNode, Agent.InactiveExpirer, "expired");
retiredNode = tester.nodeRepository().nodes().list(Node.State.parked).nodeType(NodeType.config).asList().get(0);
nodeRepository.nodes().removeRecursively(retiredNode, true);
infraDeployer.activateAllSupportedInfraApplications(true);
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(2, tester.nodeRepository().nodes().list().nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
MockNameResolver nameResolver = (MockNameResolver)tester.nodeRepository().nameResolver();
String ipv4 = "127.0.1.4";
nameResolver.addRecord(retiredNode.hostname(), ipv4);
Node node = Node.create(retiredNode.hostname(), new IP.Config(Set.of(ipv4), Set.of()), retiredNode.hostname(),
tester.asFlavor("default", NodeType.config), NodeType.config).build();
var nodes = List.of(node);
nodes = nodeRepository.nodes().addNodes(nodes, Agent.system);
nodes = nodeRepository.nodes().deallocate(nodes, Agent.system, getClass().getSimpleName());
nodeRepository.nodes().setReady(nodes, Agent.system, getClass().getSimpleName());
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.ready).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
infraDeployer.activateAllSupportedInfraApplications(true);
assertEquals(3, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
retiredExpirer.run();
assertEquals(3, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
var retiredHostnames = tester.nodeRepository()
.nodes().list(() -> {})
.stream()
.filter(n -> n.allocation().map(allocation -> allocation.membership().retired()).orElse(false))
.map(Node::hostname)
.collect(Collectors.toSet());
assertEquals(Set.of("cfg2", "cfg3"), retiredHostnames);
} | List<Node> retiredNodes = tester.nodeRepository() | public void config_server_reprovisioning() throws OrchestrationException {
NodeList configServers = tester.makeConfigServers(3, "default", Version.emptyVersion);
var cfg1 = new HostName("cfg1");
assertEquals(Set.of(cfg1.s(), "cfg2", "cfg3"), configServers.stream().map(Node::hostname).collect(Collectors.toSet()));
var configServerApplication = new ConfigServerApplication();
var duperModel = new MockDuperModel().support(configServerApplication);
InfraDeployerImpl infraDeployer = new InfraDeployerImpl(tester.nodeRepository(), tester.provisioner(), duperModel);
var deployer = mock(Deployer.class);
when(deployer.deployFromLocalActive(eq(configServerApplication.getApplicationId())))
.thenAnswer(invocation -> infraDeployer.getDeployment(configServerApplication.getApplicationId()));
List<Node> wantToRetireNodes = tester.nodeRepository().nodes()
.retire(NodeTypeFilter.from(NodeType.config, null), Agent.operator, Instant.now());
assertEquals(3, wantToRetireNodes.size());
infraDeployer.activateAllSupportedInfraApplications(true);
List<Node> retiredNodes = tester.nodeRepository().nodes().list().retired().asList();
assertEquals(3, retiredNodes.size());
Node retiredNode = tester.nodeRepository().nodes().node(cfg1.s()).orElseThrow();
doThrow(new OrchestrationException("denied")).when(orchestrator).acquirePermissionToRemove(any());
doNothing().when(orchestrator).acquirePermissionToRemove(eq(new HostName(retiredNode.hostname())));
RetiredExpirer retiredExpirer = createRetiredExpirer(deployer);
retiredExpirer.run();
var activeConfigServerHostnames = new HashSet<>(Set.of("cfg1", "cfg2", "cfg3"));
assertTrue(activeConfigServerHostnames.contains(retiredNode.hostname()));
activeConfigServerHostnames.remove(retiredNode.hostname());
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.inactive).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.inactive).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
nodeRepository.nodes().deallocate(retiredNode, Agent.InactiveExpirer, "expired");
retiredNode = tester.nodeRepository().nodes().list(Node.State.parked).nodeType(NodeType.config).asList().get(0);
nodeRepository.nodes().removeRecursively(retiredNode, true);
infraDeployer.activateAllSupportedInfraApplications(true);
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(2, tester.nodeRepository().nodes().list().nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
MockNameResolver nameResolver = (MockNameResolver)tester.nodeRepository().nameResolver();
String ipv4 = "127.0.1.4";
nameResolver.addRecord(retiredNode.hostname(), ipv4);
Node node = Node.create(retiredNode.hostname(), new IP.Config(Set.of(ipv4), Set.of()), retiredNode.hostname(),
tester.asFlavor("default", NodeType.config), NodeType.config).build();
var nodes = List.of(node);
nodes = nodeRepository.nodes().addNodes(nodes, Agent.system);
nodes = nodeRepository.nodes().deallocate(nodes, Agent.system, getClass().getSimpleName());
nodeRepository.nodes().setReady(nodes, Agent.system, getClass().getSimpleName());
retiredExpirer.run();
assertEquals(activeConfigServerHostnames, configServerHostnames(duperModel));
assertEquals(1, tester.nodeRepository().nodes().list(Node.State.ready).nodeType(NodeType.config).size());
assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
infraDeployer.activateAllSupportedInfraApplications(true);
assertEquals(3, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
retiredExpirer.run();
assertEquals(3, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.config).size());
var retiredHostnames = tester.nodeRepository()
.nodes().list(() -> {})
.stream()
.filter(n -> n.allocation().map(allocation -> allocation.membership().retired()).orElse(false))
.map(Node::hostname)
.collect(Collectors.toSet());
assertEquals(Set.of("cfg2", "cfg3"), retiredHostnames);
} | class RetiredExpirerTest {
private final NodeResources hostResources = new NodeResources(64, 128, 2000, 10);
private final NodeResources nodeResources = new NodeResources(2, 8, 50, 1);
private final ProvisioningTester tester = new ProvisioningTester.Builder().build();
private final ManualClock clock = tester.clock();
private final NodeRepository nodeRepository = tester.nodeRepository();
private final NodeRepositoryProvisioner provisioner = tester.provisioner();
private final Orchestrator orchestrator = mock(Orchestrator.class);
private static final Duration RETIRED_EXPIRATION = Duration.ofHours(12);
@Before
public void setup() throws OrchestrationException {
doThrow(new RuntimeException()).when(orchestrator).acquirePermissionToRemove(any());
}
@Test
public void ensure_retired_nodes_time_out() {
tester.makeReadyNodes(7, nodeResources);
tester.makeReadyHosts(4, hostResources);
ApplicationId applicationId = ApplicationId.from(TenantName.from("foo"), ApplicationName.from("bar"), InstanceName.from("fuz"));
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
int wantedNodes;
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
clock.advance(Duration.ofHours(30));
MockDeployer deployer =
new MockDeployer(provisioner,
clock,
Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId,
cluster,
Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources)))));
createRetiredExpirer(deployer).run();
assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
@Test
public void ensure_early_inactivation() throws OrchestrationException {
tester.makeReadyNodes(7, nodeResources);
tester.makeReadyHosts(4, hostResources);
ApplicationId applicationId = ApplicationId.from(TenantName.from("foo"), ApplicationName.from("bar"), InstanceName.from("fuz"));
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
int wantedNodes;
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
MockDeployer deployer =
new MockDeployer(provisioner,
clock,
Collections.singletonMap(
applicationId,
new MockDeployer.ApplicationContext(applicationId,
cluster,
Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources)))));
doNothing()
.doThrow(new OrchestrationException("Permission not granted 1"))
.doNothing()
.doThrow(new OrchestrationException("Permission not granted 2"))
.when(orchestrator).acquirePermissionToRemove(any());
RetiredExpirer retiredExpirer = createRetiredExpirer(deployer);
retiredExpirer.run();
assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(4)).acquirePermissionToRemove(any());
retiredExpirer.run();
assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
clock.advance(RETIRED_EXPIRATION.plusMinutes(1));
retiredExpirer.run();
assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(2, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
@Test
private Set<String> configServerHostnames(MockDuperModel duperModel) {
return duperModel.hostnamesOf(new ConfigServerApplication().getApplicationId()).stream()
.map(com.yahoo.config.provision.HostName::value)
.collect(Collectors.toSet());
}
private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodes, int groups) {
Capacity capacity = Capacity.from(new ClusterResources(nodes, groups, nodeResources));
tester.activate(applicationId, tester.prepare(applicationId, cluster, capacity));
}
private RetiredExpirer createRetiredExpirer(Deployer deployer) {
return new RetiredExpirer(nodeRepository,
orchestrator,
deployer,
new TestMetric(),
Duration.ofDays(30), /* Maintenance interval, use large value so it never runs by itself */
RETIRED_EXPIRATION);
}
} | class RetiredExpirerTest {
private final NodeResources hostResources = new NodeResources(64, 128, 2000, 10);
private final NodeResources nodeResources = new NodeResources(2, 8, 50, 1);
private final ProvisioningTester tester = new ProvisioningTester.Builder().build();
private final ManualClock clock = tester.clock();
private final NodeRepository nodeRepository = tester.nodeRepository();
private final NodeRepositoryProvisioner provisioner = tester.provisioner();
private final Orchestrator orchestrator = mock(Orchestrator.class);
private static final Duration RETIRED_EXPIRATION = Duration.ofHours(12);
@Before
public void setup() throws OrchestrationException {
doThrow(new RuntimeException()).when(orchestrator).acquirePermissionToRemove(any());
}
@Test
public void ensure_retired_nodes_time_out() {
tester.makeReadyNodes(7, nodeResources);
tester.makeReadyHosts(4, hostResources);
ApplicationId applicationId = ApplicationId.from(TenantName.from("foo"), ApplicationName.from("bar"), InstanceName.from("fuz"));
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
int wantedNodes;
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
clock.advance(Duration.ofHours(30));
MockDeployer deployer =
new MockDeployer(provisioner,
clock,
Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId,
cluster,
Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources)))));
createRetiredExpirer(deployer).run();
assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
@Test
public void ensure_early_inactivation() throws OrchestrationException {
tester.makeReadyNodes(7, nodeResources);
tester.makeReadyHosts(4, hostResources);
ApplicationId applicationId = ApplicationId.from(TenantName.from("foo"), ApplicationName.from("bar"), InstanceName.from("fuz"));
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
int wantedNodes;
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
MockDeployer deployer =
new MockDeployer(provisioner,
clock,
Collections.singletonMap(
applicationId,
new MockDeployer.ApplicationContext(applicationId,
cluster,
Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources)))));
doNothing()
.doThrow(new OrchestrationException("Permission not granted 1"))
.doNothing()
.doThrow(new OrchestrationException("Permission not granted 2"))
.when(orchestrator).acquirePermissionToRemove(any());
RetiredExpirer retiredExpirer = createRetiredExpirer(deployer);
retiredExpirer.run();
assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(4)).acquirePermissionToRemove(any());
retiredExpirer.run();
assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
clock.advance(RETIRED_EXPIRATION.plusMinutes(1));
retiredExpirer.run();
assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(2, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
@Test
private Set<String> configServerHostnames(MockDuperModel duperModel) {
return duperModel.hostnamesOf(new ConfigServerApplication().getApplicationId()).stream()
.map(com.yahoo.config.provision.HostName::value)
.collect(Collectors.toSet());
}
private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodes, int groups) {
Capacity capacity = Capacity.from(new ClusterResources(nodes, groups, nodeResources));
tester.activate(applicationId, tester.prepare(applicationId, cluster, capacity));
}
private RetiredExpirer createRetiredExpirer(Deployer deployer) {
return new RetiredExpirer(nodeRepository,
orchestrator,
deployer,
new TestMetric(),
Duration.ofDays(30), /* Maintenance interval, use large value so it never runs by itself */
RETIRED_EXPIRATION);
}
} |
Looks like we are just using this to get the `TenantName`, but we already have it? | private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
} | var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName)); | private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role set to '" + role + "' for tenant " + tenantName + ".");
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = (CloudTenant)controller.tenants().require(applicationId.tenant());
if (tenant.type() != Tenant.Type.cloud) {
return ErrorResponse.badRequest("Tenant '" + applicationId.tenant() + "' is not a cloud tenant");
}
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is invalid"));
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is already configured"));
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.SEVERE, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone)
.not().legacy();
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(zone),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(com.yahoo.vespa.hosted.controller.Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
tenantMetaDataToSlime(tenant, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) {
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> controller.jobController().jobs(instance.id()).stream()
.filter(jobType -> jobType.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder());
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
Cursor storeObject = secretStore.addObject();
storeObject.setString("name", store.getName());
storeObject.setString("awsId", store.getAwsId());
storeObject.setString("role", store.getRole());
});
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = (CloudTenant)controller.tenants().require(applicationId.tenant());
if (tenant.type() != Tenant.Type.cloud) {
return ErrorResponse.badRequest("Tenant '" + applicationId.tenant() + "' is not a cloud tenant");
}
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is invalid"));
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is already configured"));
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.SEVERE, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone)
.not().legacy();
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(zone),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(com.yahoo.vespa.hosted.controller.Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
tenantMetaDataToSlime(tenant, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) {
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> controller.jobController().jobs(instance.id()).stream()
.filter(jobType -> jobType.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder());
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
Cursor storeObject = secretStore.addObject();
storeObject.setString("name", store.getName());
storeObject.setString("awsId", store.getAwsId());
storeObject.setString("role", store.getRole());
});
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} |
Probably enough to just return a `MessageResponse` for this | private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
} | toSlime(slime.setObject(), tenant.tenantSecretStores()); | private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role set to '" + role + "' for tenant " + tenantName + ".");
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = (CloudTenant)controller.tenants().require(applicationId.tenant());
if (tenant.type() != Tenant.Type.cloud) {
return ErrorResponse.badRequest("Tenant '" + applicationId.tenant() + "' is not a cloud tenant");
}
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is invalid"));
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is already configured"));
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.SEVERE, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone)
.not().legacy();
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(zone),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(com.yahoo.vespa.hosted.controller.Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
tenantMetaDataToSlime(tenant, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) {
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> controller.jobController().jobs(instance.id()).stream()
.filter(jobType -> jobType.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder());
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
Cursor storeObject = secretStore.addObject();
storeObject.setString("name", store.getName());
storeObject.setString("awsId", store.getAwsId());
storeObject.setString("role", store.getRole());
});
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = (CloudTenant)controller.tenants().require(applicationId.tenant());
if (tenant.type() != Tenant.Type.cloud) {
return ErrorResponse.badRequest("Tenant '" + applicationId.tenant() + "' is not a cloud tenant");
}
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is invalid"));
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is already configured"));
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = (CloudTenant) controller.tenants().require(TenantName.from(tenantName));
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.SEVERE, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
com.yahoo.vespa.hosted.controller.Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone)
.not().legacy();
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.not().legacy()
.targets(deploymentId.zoneId());
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(zone),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(com.yahoo.vespa.hosted.controller.Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (com.yahoo.vespa.hosted.controller.Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
tenantMetaDataToSlime(tenant, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) {
List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name());
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> controller.jobController().jobs(instance.id()).stream()
.filter(jobType -> jobType.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder());
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
Cursor storeObject = secretStore.addObject();
storeObject.setString("name", store.getName());
storeObject.setString("awsId", store.getAwsId());
storeObject.setString("role", store.getRole());
});
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} |
Consider changing control flow to change state from `FAILED` to `READY` instead of jumping directly to RUNNING. | private void progress(DocumentType type, AtomicReference<Reindexing> reindexing, AtomicReference<Status> status) {
switch (status.get().state()) {
default:
log.log(WARNING, "Unknown reindexing state '" + status.get().state() + "'—not continuing reindexing of " + type);
case SUCCESSFUL:
return;
case RUNNING:
log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type);
break;
case FAILED:
if (clock.instant().isBefore(status.get().endedAt().get().plus(failureGrace)))
return;
case READY:
status.updateAndGet(Status::running);
}
AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant());
VisitorControlHandler control = new VisitorControlHandler() {
@Override
public void onProgress(ProgressToken token) {
super.onProgress(token);
status.updateAndGet(value -> value.progressed(token));
if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) {
progressLastStored.set(clock.instant());
database.writeReindexing(reindexing.updateAndGet(value -> value.with(type, status.get())), cluster.name());
metrics.dump(reindexing.get());
}
}
@Override
public void onDone(CompletionCode code, String message) {
super.onDone(code, message);
phaser.arriveAndAwaitAdvance();
}
};
VisitorParameters parameters = createParameters(type, status.get().progress().orElse(null));
parameters.setControlHandler(control);
Runnable sessionShutdown = visitorSessions.apply(parameters);
log.log(FINE, () -> "Running reindexing of " + type);
phaser.arriveAndAwaitAdvance();
sessionShutdown.run();
CompletionCode result = control.getResult() != null ? control.getResult().getCode()
: CompletionCode.ABORTED;
switch (result) {
default:
log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'");
case FAILURE:
log.log(WARNING, "Visiting failed: " + control.getResult().getMessage());
status.updateAndGet(value -> value.failed(clock.instant(), control.getResult().getMessage()));
break;
case ABORTED:
log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later");
status.updateAndGet(Status::halted);
break;
case SUCCESS:
log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.get().startedAt(), clock.instant()));
status.updateAndGet(value -> value.successful(clock.instant()));
}
database.writeReindexing(reindexing.updateAndGet(value -> value.with(type, status.get())), cluster.name());
metrics.dump(reindexing.get());
} | case FAILED: | private void progress(DocumentType type, AtomicReference<Reindexing> reindexing, AtomicReference<Status> status) {
switch (status.get().state()) {
default:
log.log(WARNING, "Unknown reindexing state '" + status.get().state() + "'—not continuing reindexing of " + type);
case SUCCESSFUL:
return;
case RUNNING:
log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type);
break;
case FAILED:
if (clock.instant().isBefore(status.get().endedAt().get().plus(failureGrace)))
return;
case READY:
status.updateAndGet(Status::running);
}
AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant());
VisitorControlHandler control = new VisitorControlHandler() {
@Override
public void onProgress(ProgressToken token) {
super.onProgress(token);
status.updateAndGet(value -> value.progressed(token));
if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) {
progressLastStored.set(clock.instant());
database.writeReindexing(reindexing.updateAndGet(value -> value.with(type, status.get())), cluster.name());
metrics.dump(reindexing.get());
}
}
@Override
public void onDone(CompletionCode code, String message) {
super.onDone(code, message);
phaser.arriveAndAwaitAdvance();
}
};
VisitorParameters parameters = createParameters(type, status.get().progress().orElse(null));
parameters.setControlHandler(control);
Runnable sessionShutdown = visitorSessions.apply(parameters);
log.log(FINE, () -> "Running reindexing of " + type);
phaser.arriveAndAwaitAdvance();
sessionShutdown.run();
CompletionCode result = control.getResult() != null ? control.getResult().getCode()
: CompletionCode.ABORTED;
switch (result) {
default:
log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'");
case FAILURE:
log.log(WARNING, "Visiting failed: " + control.getResult().getMessage());
status.updateAndGet(value -> value.failed(clock.instant(), control.getResult().getMessage()));
break;
case ABORTED:
log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later");
status.updateAndGet(Status::halted);
break;
case SUCCESS:
log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.get().startedAt(), clock.instant()));
status.updateAndGet(value -> value.successful(clock.instant()));
}
database.writeReindexing(reindexing.updateAndGet(value -> value.with(type, status.get())), cluster.name());
metrics.dump(reindexing.get());
} | class Reindexer {
private static final Logger log = Logger.getLogger(Reindexer.class.getName());
static final Duration failureGrace = Duration.ofMinutes(10);
private final Cluster cluster;
private final Map<DocumentType, Instant> ready;
private final ReindexingCurator database;
private final Function<VisitorParameters, Runnable> visitorSessions;
private final ReindexingMetrics metrics;
private final Clock clock;
private final Phaser phaser = new Phaser(2);
public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database,
DocumentAccess access, Metric metric, Clock clock) {
this(cluster,
ready,
database,
parameters -> {
try {
return access.createVisitorSession(parameters)::destroy;
}
catch (ParseException e) {
throw new IllegalStateException(e);
}
},
metric,
clock
);
}
Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database,
Function<VisitorParameters, Runnable> visitorSessions, Metric metric, Clock clock) {
for (DocumentType type : ready.keySet())
cluster.bucketSpaceOf(type);
this.cluster = cluster;
this.ready = new TreeMap<>(ready);
this.database = database;
this.visitorSessions = visitorSessions;
this.metrics = new ReindexingMetrics(metric, cluster.name);
this.clock = clock;
database.initializeIfEmpty(cluster.name, ready, clock.instant());
}
/** Lets the reindexer abort any ongoing visit session, wait for it to complete normally, then exit. */
public void shutdown() {
phaser.forceTermination();
}
/** Starts and tracks reprocessing of ready document types until done, or interrupted. */
public void reindex() throws ReindexingLockException {
if (phaser.isTerminated())
throw new IllegalStateException("Already shut down");
AtomicReference<Reindexing> reindexing = new AtomicReference<>(database.readReindexing(cluster.name()));
database.writeReindexing(reindexing.get(), cluster.name());
metrics.dump(reindexing.get());
try (Lock lock = database.lockReindexing(cluster.name())) {
reindexing.set(updateWithReady(ready, reindexing.get(), clock.instant()));
database.writeReindexing(reindexing.get(), cluster.name());
metrics.dump(reindexing.get());
for (DocumentType type : ready.keySet()) {
if (ready.get(type).isAfter(clock.instant()))
log.log(INFO, "Received config for reindexing which is ready in the future — will process later " +
"(" + ready.get(type) + " is after " + clock.instant() + ")");
else
progress(type, reindexing, new AtomicReference<>(reindexing.get().status().get(type)));
if (phaser.isTerminated())
break;
}
}
}
static Reindexing updateWithReady(Map<DocumentType, Instant> ready, Reindexing reindexing, Instant now) {
for (DocumentType type : ready.keySet()) {
if ( ! ready.get(type).isAfter(now)) {
Status status = reindexing.status().getOrDefault(type, Status.ready(now));
if (status.startedAt().isBefore(ready.get(type)))
status = Status.ready(now);
reindexing = reindexing.with(type, status);
}
}
return reindexing;
}
@SuppressWarnings("fallthrough")
VisitorParameters createParameters(DocumentType type, ProgressToken progress) {
VisitorParameters parameters = new VisitorParameters(type.getName());
parameters.setThrottlePolicy(new DynamicThrottlePolicy().setWindowSizeIncrement(0.2)
.setWindowSizeDecrementFactor(5)
.setResizeRate(10)
.setMinWindowSize(1));
parameters.setRemoteDataHandler(cluster.name());
parameters.setMaxPending(32);
parameters.setResumeToken(progress);
parameters.setFieldSet(type.getName() + ":[document]");
parameters.setPriority(DocumentProtocol.Priority.NORMAL_3);
parameters.setRoute(cluster.route());
parameters.setBucketSpace(cluster.bucketSpaceOf(type));
parameters.setMaxBucketsPerVisitor(1);
parameters.setVisitorLibrary("ReindexingVisitor");
return parameters;
}
static class Cluster {
private final String name;
private final Map<DocumentType, String> documentBuckets;
Cluster(String name, Map<DocumentType, String> documentBuckets) {
this.name = requireNonNull(name);
this.documentBuckets = Map.copyOf(documentBuckets);
}
String name() {
return name;
}
String route() {
return "[Content:cluster=" + name + "]";
}
String bucketSpaceOf(DocumentType documentType) {
return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Cluster cluster = (Cluster) o;
return name.equals(cluster.name) &&
documentBuckets.equals(cluster.documentBuckets);
}
@Override
public int hashCode() {
return Objects.hash(name, documentBuckets);
}
@Override
public String toString() {
return "Cluster{" +
"name='" + name + '\'' +
", documentBuckets=" + documentBuckets +
'}';
}
}
} | class Reindexer {
private static final Logger log = Logger.getLogger(Reindexer.class.getName());
static final Duration failureGrace = Duration.ofMinutes(10);
private final Cluster cluster;
private final Map<DocumentType, Instant> ready;
private final ReindexingCurator database;
private final Function<VisitorParameters, Runnable> visitorSessions;
private final ReindexingMetrics metrics;
private final Clock clock;
private final Phaser phaser = new Phaser(2);
public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database,
DocumentAccess access, Metric metric, Clock clock) {
this(cluster,
ready,
database,
parameters -> {
try {
return access.createVisitorSession(parameters)::destroy;
}
catch (ParseException e) {
throw new IllegalStateException(e);
}
},
metric,
clock
);
}
Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database,
Function<VisitorParameters, Runnable> visitorSessions, Metric metric, Clock clock) {
for (DocumentType type : ready.keySet())
cluster.bucketSpaceOf(type);
this.cluster = cluster;
this.ready = new TreeMap<>(ready);
this.database = database;
this.visitorSessions = visitorSessions;
this.metrics = new ReindexingMetrics(metric, cluster.name);
this.clock = clock;
database.initializeIfEmpty(cluster.name, ready, clock.instant());
}
/** Lets the reindexer abort any ongoing visit session, wait for it to complete normally, then exit. */
public void shutdown() {
phaser.forceTermination();
}
/** Starts and tracks reprocessing of ready document types until done, or interrupted. */
public void reindex() throws ReindexingLockException {
if (phaser.isTerminated())
throw new IllegalStateException("Already shut down");
AtomicReference<Reindexing> reindexing = new AtomicReference<>(database.readReindexing(cluster.name()));
database.writeReindexing(reindexing.get(), cluster.name());
metrics.dump(reindexing.get());
try (Lock lock = database.lockReindexing(cluster.name())) {
reindexing.set(updateWithReady(ready, reindexing.get(), clock.instant()));
database.writeReindexing(reindexing.get(), cluster.name());
metrics.dump(reindexing.get());
for (DocumentType type : ready.keySet()) {
if (ready.get(type).isAfter(clock.instant()))
log.log(INFO, "Received config for reindexing which is ready in the future — will process later " +
"(" + ready.get(type) + " is after " + clock.instant() + ")");
else
progress(type, reindexing, new AtomicReference<>(reindexing.get().status().get(type)));
if (phaser.isTerminated())
break;
}
}
}
static Reindexing updateWithReady(Map<DocumentType, Instant> ready, Reindexing reindexing, Instant now) {
for (DocumentType type : ready.keySet()) {
if ( ! ready.get(type).isAfter(now)) {
Status status = reindexing.status().getOrDefault(type, Status.ready(now));
if (status.startedAt().isBefore(ready.get(type)))
status = Status.ready(now);
reindexing = reindexing.with(type, status);
}
}
return reindexing;
}
@SuppressWarnings("fallthrough")
VisitorParameters createParameters(DocumentType type, ProgressToken progress) {
VisitorParameters parameters = new VisitorParameters(type.getName());
parameters.setThrottlePolicy(new DynamicThrottlePolicy().setWindowSizeIncrement(0.2)
.setWindowSizeDecrementFactor(5)
.setResizeRate(10)
.setMinWindowSize(1));
parameters.setRemoteDataHandler(cluster.name());
parameters.setMaxPending(32);
parameters.setResumeToken(progress);
parameters.setFieldSet(type.getName() + ":[document]");
parameters.setPriority(DocumentProtocol.Priority.NORMAL_3);
parameters.setRoute(cluster.route());
parameters.setBucketSpace(cluster.bucketSpaceOf(type));
parameters.setMaxBucketsPerVisitor(1);
parameters.setVisitorLibrary("ReindexingVisitor");
return parameters;
}
static class Cluster {
private final String name;
private final Map<DocumentType, String> documentBuckets;
Cluster(String name, Map<DocumentType, String> documentBuckets) {
this.name = requireNonNull(name);
this.documentBuckets = Map.copyOf(documentBuckets);
}
String name() {
return name;
}
String route() {
return "[Content:cluster=" + name + "]";
}
String bucketSpaceOf(DocumentType documentType) {
return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Cluster cluster = (Cluster) o;
return name.equals(cluster.name) &&
documentBuckets.equals(cluster.documentBuckets);
}
@Override
public int hashCode() {
return Objects.hash(name, documentBuckets);
}
@Override
public String toString() {
return "Cluster{" +
"name='" + name + '\'' +
", documentBuckets=" + documentBuckets +
'}';
}
}
} |
Consider something like ```suggestion return find(tenant, zoneBuckets).map(ArchiveBucket::bucketArn).orElseGet(() -> assignToBucket(zoneId, tenant)); ``` even better if `find()` does the mapping since both users just want the bucket ARN anyway... | private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) {
var zoneBuckets = curatorDb.readArchiveBuckets(zoneId);
if (find(tenant, zoneBuckets).isPresent()) return find(tenant, zoneBuckets).get().bucketArn();
else return assignToBucket(zoneId, tenant);
} | else return assignToBucket(zoneId, tenant); | private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) {
var zoneBuckets = curatorDb.readArchiveBuckets(zoneId);
return find(tenant, zoneBuckets).orElseGet(() -> assignToBucket(zoneId, tenant));
} | class CuratorArchiveBucketDb extends AbstractComponent implements ArchiveBucketDb {
/**
* Due to policy limits, we can't put data for more than this many tenants in a bucket.
* Policy size limit is 20kb, with approx. 500 bytes of policy required per tenant = 40 tenants.
* We set the maximum a bit lower to have a solid margin of error.
*/
private final static int TENANTS_PER_BUCKET = 30;
private final ArchiveService archiveService;
private final CuratorDb curatorDb;
private final StringFlag bucketNameFlag;
@Inject
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
this.bucketNameFlag = Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.bindTo(controller.flagSource());
}
@Override
public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
String bucketArn = bucketNameFlag
.with(FetchVector.Dimension.ZONE_ID, zoneId.value())
.with(FetchVector.Dimension.TENANT_ID, tenant.value())
.value();
if (bucketArn.isBlank()) return Optional.empty();
if ("auto".equals(bucketArn)) bucketArn = findOrAssignBucket(zoneId, tenant);
return Optional.of(URI.create(String.format("s3:
}
private String assignToBucket(ZoneId zoneId, TenantName tenant) {
try (var lock = curatorDb.lockArchiveBuckets(zoneId)) {
Set<ArchiveBucket> zoneBuckets = new HashSet<>(curatorDb.readArchiveBuckets(zoneId));
if (find(tenant, zoneBuckets).isPresent()) return find(tenant, zoneBuckets).get().bucketArn();
Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET)
.findAny();
if (unfilledBucket.isPresent()) {
var unfilled = unfilledBucket.get();
var tenants = new HashSet<>(unfilled.tenants());
tenants.add(tenant);
var updatedBucket = new ArchiveBucket(unfilled.bucketArn(), unfilled.keyArn(), tenants);
zoneBuckets.remove(unfilled);
zoneBuckets.add(updatedBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return updatedBucket.bucketArn();
}
var newBucket = archiveService.createArchiveBucketFor(zoneId, Set.of(tenant));
zoneBuckets.add(newBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return newBucket.bucketArn();
}
}
@NotNull
private Optional<ArchiveBucket> find(TenantName tenant, Set<ArchiveBucket> zoneBuckets) {
return zoneBuckets.stream().filter(bucket -> bucket.tenants().contains(tenant)).findAny();
}
@Override
public Set<ArchiveBucket> buckets(ZoneId zoneId) {
return curatorDb.readArchiveBuckets(zoneId);
}
} | class CuratorArchiveBucketDb implements ArchiveBucketDb {
/**
* Due to policy limits, we can't put data for more than this many tenants in a bucket.
* Policy size limit is 20kb, with approx. 500 bytes of policy required per tenant = 40 tenants.
* We set the maximum a bit lower to have a solid margin of error.
*/
private final static int TENANTS_PER_BUCKET = 30;
private final ArchiveService archiveService;
private final CuratorDb curatorDb;
private final StringFlag bucketNameFlag;
@Inject
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
this.bucketNameFlag = Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.bindTo(controller.flagSource());
}
@Override
public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
String bucketName = bucketNameFlag
.with(FetchVector.Dimension.ZONE_ID, zoneId.value())
.with(FetchVector.Dimension.TENANT_ID, tenant.value())
.value();
if (bucketName.isBlank()) return Optional.empty();
if ("auto".equals(bucketName)) bucketName = findOrAssignBucket(zoneId, tenant);
return Optional.of(URI.create(String.format("s3:
}
private String assignToBucket(ZoneId zoneId, TenantName tenant) {
try (var lock = curatorDb.lockArchiveBuckets(zoneId)) {
Set<ArchiveBucket> zoneBuckets = new HashSet<>(curatorDb.readArchiveBuckets(zoneId));
return find(tenant, zoneBuckets)
.orElseGet(() -> {
Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET)
.findAny();
if (unfilledBucket.isPresent()) {
var unfilled = unfilledBucket.get();
zoneBuckets.remove(unfilled);
zoneBuckets.add(unfilled.withTenant(tenant));
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return unfilled.bucketName();
}
var newBucket = archiveService.createArchiveBucketFor(zoneId).withTenant(tenant);
zoneBuckets.add(newBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return newBucket.bucketName();
});
}
}
@NotNull
private Optional<String> find(TenantName tenant, Set<ArchiveBucket> zoneBuckets) {
return zoneBuckets.stream()
.filter(bucket -> bucket.tenants().contains(tenant))
.findAny()
.map(ArchiveBucket::bucketName);
}
@Override
public Set<ArchiveBucket> buckets(ZoneId zoneId) {
return curatorDb.readArchiveBuckets(zoneId);
}
} |
Consider adding a `withTenant()` method to `ArchiveBucket` that does all this | private String assignToBucket(ZoneId zoneId, TenantName tenant) {
try (var lock = curatorDb.lockArchiveBuckets(zoneId)) {
Set<ArchiveBucket> zoneBuckets = new HashSet<>(curatorDb.readArchiveBuckets(zoneId));
if (find(tenant, zoneBuckets).isPresent()) return find(tenant, zoneBuckets).get().bucketArn();
Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET)
.findAny();
if (unfilledBucket.isPresent()) {
var unfilled = unfilledBucket.get();
var tenants = new HashSet<>(unfilled.tenants());
tenants.add(tenant);
var updatedBucket = new ArchiveBucket(unfilled.bucketArn(), unfilled.keyArn(), tenants);
zoneBuckets.remove(unfilled);
zoneBuckets.add(updatedBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return updatedBucket.bucketArn();
}
var newBucket = archiveService.createArchiveBucketFor(zoneId, Set.of(tenant));
zoneBuckets.add(newBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return newBucket.bucketArn();
}
} | var updatedBucket = new ArchiveBucket(unfilled.bucketArn(), unfilled.keyArn(), tenants); | private String assignToBucket(ZoneId zoneId, TenantName tenant) {
try (var lock = curatorDb.lockArchiveBuckets(zoneId)) {
Set<ArchiveBucket> zoneBuckets = new HashSet<>(curatorDb.readArchiveBuckets(zoneId));
return find(tenant, zoneBuckets)
.orElseGet(() -> {
Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET)
.findAny();
if (unfilledBucket.isPresent()) {
var unfilled = unfilledBucket.get();
zoneBuckets.remove(unfilled);
zoneBuckets.add(unfilled.withTenant(tenant));
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return unfilled.bucketName();
}
var newBucket = archiveService.createArchiveBucketFor(zoneId).withTenant(tenant);
zoneBuckets.add(newBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return newBucket.bucketName();
});
}
} | class CuratorArchiveBucketDb extends AbstractComponent implements ArchiveBucketDb {
/**
* Due to policy limits, we can't put data for more than this many tenants in a bucket.
* Policy size limit is 20kb, with approx. 500 bytes of policy required per tenant = 40 tenants.
* We set the maximum a bit lower to have a solid margin of error.
*/
private final static int TENANTS_PER_BUCKET = 30;
private final ArchiveService archiveService;
private final CuratorDb curatorDb;
private final StringFlag bucketNameFlag;
@Inject
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
this.bucketNameFlag = Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.bindTo(controller.flagSource());
}
@Override
public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
String bucketArn = bucketNameFlag
.with(FetchVector.Dimension.ZONE_ID, zoneId.value())
.with(FetchVector.Dimension.TENANT_ID, tenant.value())
.value();
if (bucketArn.isBlank()) return Optional.empty();
if ("auto".equals(bucketArn)) bucketArn = findOrAssignBucket(zoneId, tenant);
return Optional.of(URI.create(String.format("s3:
}
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) {
var zoneBuckets = curatorDb.readArchiveBuckets(zoneId);
if (find(tenant, zoneBuckets).isPresent()) return find(tenant, zoneBuckets).get().bucketArn();
else return assignToBucket(zoneId, tenant);
}
@NotNull
private Optional<ArchiveBucket> find(TenantName tenant, Set<ArchiveBucket> zoneBuckets) {
return zoneBuckets.stream().filter(bucket -> bucket.tenants().contains(tenant)).findAny();
}
@Override
public Set<ArchiveBucket> buckets(ZoneId zoneId) {
return curatorDb.readArchiveBuckets(zoneId);
}
} | class CuratorArchiveBucketDb implements ArchiveBucketDb {
/**
* Due to policy limits, we can't put data for more than this many tenants in a bucket.
* Policy size limit is 20kb, with approx. 500 bytes of policy required per tenant = 40 tenants.
* We set the maximum a bit lower to have a solid margin of error.
*/
private final static int TENANTS_PER_BUCKET = 30;
private final ArchiveService archiveService;
private final CuratorDb curatorDb;
private final StringFlag bucketNameFlag;
@Inject
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
this.bucketNameFlag = Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.bindTo(controller.flagSource());
}
@Override
public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
String bucketName = bucketNameFlag
.with(FetchVector.Dimension.ZONE_ID, zoneId.value())
.with(FetchVector.Dimension.TENANT_ID, tenant.value())
.value();
if (bucketName.isBlank()) return Optional.empty();
if ("auto".equals(bucketName)) bucketName = findOrAssignBucket(zoneId, tenant);
return Optional.of(URI.create(String.format("s3:
}
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) {
var zoneBuckets = curatorDb.readArchiveBuckets(zoneId);
return find(tenant, zoneBuckets).orElseGet(() -> assignToBucket(zoneId, tenant));
}
@NotNull
private Optional<String> find(TenantName tenant, Set<ArchiveBucket> zoneBuckets) {
return zoneBuckets.stream()
.filter(bucket -> bucket.tenants().contains(tenant))
.findAny()
.map(ArchiveBucket::bucketName);
}
@Override
public Set<ArchiveBucket> buckets(ZoneId zoneId) {
return curatorDb.readArchiveBuckets(zoneId);
}
} |
Consider creating a new method on tester that allows creating a tenant with a given access role or helper method to update access role so we dont test REST API here, plus it'll be much faster. | public void grantsRoleAccess() {
var containerTester = new ContainerTester(container, "");
((InMemoryFlagSource) containerTester.controller().flagSource())
.withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
var tester = new ControllerTester(containerTester);
var tenantName = tester.createTenant("tenant1", Tenant.Type.cloud);
containerTester.assertResponse(request("/application/v4/tenant/tenant1/archive-access", PUT)
.data("{\"role\":\"arn:aws:iam::123456789012:role/my-role\"}").roles(Role.administrator(tenantName)),
"{\"message\":\"Archive access role set to 'arn:aws:iam::123456789012:role/my-role' for tenant tenant1.\"}", 200);
var archiveBucketDb = (MockArchiveBucketDb) tester.controller().serviceRegistry().archiveBucketDb();
var testBucket = new ArchiveBucket("foo", "bar", Set.of(tenantName));
archiveBucketDb.addBucket(ZoneId.from("prod.us-east-3"), testBucket);
MockArchiveService archiveService = (MockArchiveService) tester.controller().serviceRegistry().archiveService();
assertNull(archiveService.authorizedIamRoles.get(testBucket));
new ArchiveAccessMaintainer(containerTester.controller(), Duration.ofMinutes(10)).maintain();
assertEquals("arn:aws:iam::123456789012:role/my-role", archiveService.authorizedIamRoles.get(testBucket).get(tenantName));
} | containerTester.assertResponse(request("/application/v4/tenant/tenant1/archive-access", PUT) | public void grantsRoleAccess() {
var containerTester = new ContainerTester(container, "");
((InMemoryFlagSource) containerTester.controller().flagSource())
.withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true)
.withStringFlag(Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.id(), "auto");
var tester = new ControllerTester(containerTester);
String tenant1role = "arn:aws:iam::123456789012:role/my-role";
String tenant2role = "arn:aws:iam::210987654321:role/my-role";
var tenant1 = createTenantWithAccessRole(tester, "tenant1", tenant1role);
createTenantWithAccessRole(tester, "tenant2", tenant2role);
tester.controller().archiveBucketDb().archiveUriFor(ZoneId.from("prod.us-east-3"), tenant1);
var testBucket = new ArchiveBucket("bucketName", "keyArn").withTenant(tenant1);
MockArchiveService archiveService = (MockArchiveService) tester.controller().serviceRegistry().archiveService();
assertNull(archiveService.authorizedIamRoles.get(testBucket));
new ArchiveAccessMaintainer(containerTester.controller(), Duration.ofMinutes(10)).maintain();
assertEquals(Map.of(tenant1, tenant1role), archiveService.authorizedIamRoles.get(testBucket));
} | class ArchiveAccessMaintainerTest extends ControllerContainerCloudTest {
@Test
} | class ArchiveAccessMaintainerTest extends ControllerContainerCloudTest {
@Test
private TenantName createTenantWithAccessRole(ControllerTester tester, String tenantName, String role) {
var tenant = tester.createTenant(tenantName, Tenant.Type.cloud);
tester.controller().tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
tester.controller().tenants().store(lockedTenant);
});
return tenant;
}
} |
The URI should have the bucket name, not the bucket ARN. | public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
String bucketArn = bucketNameFlag
.with(FetchVector.Dimension.ZONE_ID, zoneId.value())
.with(FetchVector.Dimension.TENANT_ID, tenant.value())
.value();
if (bucketArn.isBlank()) return Optional.empty();
if ("auto".equals(bucketArn)) bucketArn = findOrAssignBucket(zoneId, tenant);
return Optional.of(URI.create(String.format("s3:
} | return Optional.of(URI.create(String.format("s3: | public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
String bucketName = bucketNameFlag
.with(FetchVector.Dimension.ZONE_ID, zoneId.value())
.with(FetchVector.Dimension.TENANT_ID, tenant.value())
.value();
if (bucketName.isBlank()) return Optional.empty();
if ("auto".equals(bucketName)) bucketName = findOrAssignBucket(zoneId, tenant);
return Optional.of(URI.create(String.format("s3:
} | class CuratorArchiveBucketDb extends AbstractComponent implements ArchiveBucketDb {
/**
* Due to policy limits, we can't put data for more than this many tenants in a bucket.
* Policy size limit is 20kb, with approx. 500 bytes of policy required per tenant = 40 tenants.
* We set the maximum a bit lower to have a solid margin of error.
*/
private final static int TENANTS_PER_BUCKET = 30;
private final ArchiveService archiveService;
private final CuratorDb curatorDb;
private final StringFlag bucketNameFlag;
@Inject
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
this.bucketNameFlag = Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.bindTo(controller.flagSource());
}
@Override
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) {
var zoneBuckets = curatorDb.readArchiveBuckets(zoneId);
if (find(tenant, zoneBuckets).isPresent()) return find(tenant, zoneBuckets).get().bucketArn();
else return assignToBucket(zoneId, tenant);
}
private String assignToBucket(ZoneId zoneId, TenantName tenant) {
try (var lock = curatorDb.lockArchiveBuckets(zoneId)) {
Set<ArchiveBucket> zoneBuckets = new HashSet<>(curatorDb.readArchiveBuckets(zoneId));
if (find(tenant, zoneBuckets).isPresent()) return find(tenant, zoneBuckets).get().bucketArn();
Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET)
.findAny();
if (unfilledBucket.isPresent()) {
var unfilled = unfilledBucket.get();
var tenants = new HashSet<>(unfilled.tenants());
tenants.add(tenant);
var updatedBucket = new ArchiveBucket(unfilled.bucketArn(), unfilled.keyArn(), tenants);
zoneBuckets.remove(unfilled);
zoneBuckets.add(updatedBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return updatedBucket.bucketArn();
}
var newBucket = archiveService.createArchiveBucketFor(zoneId, Set.of(tenant));
zoneBuckets.add(newBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return newBucket.bucketArn();
}
}
@NotNull
private Optional<ArchiveBucket> find(TenantName tenant, Set<ArchiveBucket> zoneBuckets) {
return zoneBuckets.stream().filter(bucket -> bucket.tenants().contains(tenant)).findAny();
}
@Override
public Set<ArchiveBucket> buckets(ZoneId zoneId) {
return curatorDb.readArchiveBuckets(zoneId);
}
} | class CuratorArchiveBucketDb implements ArchiveBucketDb {
/**
* Due to policy limits, we can't put data for more than this many tenants in a bucket.
* Policy size limit is 20kb, with approx. 500 bytes of policy required per tenant = 40 tenants.
* We set the maximum a bit lower to have a solid margin of error.
*/
private final static int TENANTS_PER_BUCKET = 30;
private final ArchiveService archiveService;
private final CuratorDb curatorDb;
private final StringFlag bucketNameFlag;
@Inject
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
this.bucketNameFlag = Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.bindTo(controller.flagSource());
}
@Override
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) {
var zoneBuckets = curatorDb.readArchiveBuckets(zoneId);
return find(tenant, zoneBuckets).orElseGet(() -> assignToBucket(zoneId, tenant));
}
private String assignToBucket(ZoneId zoneId, TenantName tenant) {
try (var lock = curatorDb.lockArchiveBuckets(zoneId)) {
Set<ArchiveBucket> zoneBuckets = new HashSet<>(curatorDb.readArchiveBuckets(zoneId));
return find(tenant, zoneBuckets)
.orElseGet(() -> {
Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET)
.findAny();
if (unfilledBucket.isPresent()) {
var unfilled = unfilledBucket.get();
zoneBuckets.remove(unfilled);
zoneBuckets.add(unfilled.withTenant(tenant));
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return unfilled.bucketName();
}
var newBucket = archiveService.createArchiveBucketFor(zoneId).withTenant(tenant);
zoneBuckets.add(newBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return newBucket.bucketName();
});
}
}
@NotNull
private Optional<String> find(TenantName tenant, Set<ArchiveBucket> zoneBuckets) {
return zoneBuckets.stream()
.filter(bucket -> bucket.tenants().contains(tenant))
.findAny()
.map(ArchiveBucket::bucketName);
}
@Override
public Set<ArchiveBucket> buckets(ZoneId zoneId) {
return curatorDb.readArchiveBuckets(zoneId);
}
} |
Could consider a fallback to the deployment's `at` when there is no record of a last job, or when the job-type is unknown. Otherwise LGTM :) | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().last(jobId.get())
.flatMap(Run::end)
.map(end -> end.plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | return controller().jobController().last(jobId.get()) | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().last(jobId.get())
.flatMap(Run::end)
.map(end -> end.plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected boolean maintain() {
boolean success = true;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
success = false;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return success;
}
/** Returns whether given deployment has expired according to its TTL */
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected boolean maintain() {
boolean success = true;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
success = false;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return success;
}
/** Returns whether given deployment has expired according to its TTL */
} |
I prefer to keep the errors separate so it is clear what is the problem. That is more clear. It is also easier to handle changes to the rules later on. | public void process(boolean validate, boolean documentsOnly) {
for (SDField field : search.allConcreteFields()) {
Dictionary dictionary = field.getDictionary();
if (dictionary == null) continue;
Attribute attribute = field.getAttribute();
if (attribute.getDataType() instanceof NumericDataType ) {
if (attribute.isFastSearch()) {
attribute.setDictionary(dictionary);
} else {
fail(search, field, "You must specify attribute:fast-search to allow dictionary control");
}
} else {
fail(search, field, "You can only specify 'dictionary:' for numeric fields");
}
}
} | fail(search, field, "You can only specify 'dictionary:' for numeric fields"); | public void process(boolean validate, boolean documentsOnly) {
for (SDField field : search.allConcreteFields()) {
Dictionary dictionary = field.getDictionary();
if (dictionary == null) continue;
Attribute attribute = field.getAttribute();
if (attribute.getDataType() instanceof NumericDataType ) {
if (attribute.isFastSearch()) {
attribute.setDictionary(dictionary);
} else {
fail(search, field, "You must specify 'attribute:fast-search' to allow dictionary control");
}
} else {
fail(search, field, "You can only specify 'dictionary:' for numeric fields");
}
}
} | class DictionaryProcessor extends Processor {
public DictionaryProcessor(Search search, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(search, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
} | class DictionaryProcessor extends Processor {
public DictionaryProcessor(Search search, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(search, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
} |
Sure, but can that happen? | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().last(jobId.get())
.flatMap(Run::end)
.map(end -> end.plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | return controller().jobController().last(jobId.get()) | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().last(jobId.get())
.flatMap(Run::end)
.map(end -> end.plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected boolean maintain() {
boolean success = true;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
success = false;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return success;
}
/** Returns whether given deployment has expired according to its TTL */
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected boolean maintain() {
boolean success = true;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
success = false;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return success;
}
/** Returns whether given deployment has expired according to its TTL */
} |
Not now. | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().last(jobId.get())
.flatMap(Run::end)
.map(end -> end.plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | return controller().jobController().last(jobId.get()) | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().last(jobId.get())
.flatMap(Run::end)
.map(end -> end.plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected boolean maintain() {
boolean success = true;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
success = false;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return success;
}
/** Returns whether given deployment has expired according to its TTL */
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected boolean maintain() {
boolean success = true;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
success = false;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return success;
}
/** Returns whether given deployment has expired according to its TTL */
} |
Ok, if it happens it sounds like a bug somewhere else and I don't think we should work around that here. | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().last(jobId.get())
.flatMap(Run::end)
.map(end -> end.plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | return controller().jobController().last(jobId.get()) | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().last(jobId.get())
.flatMap(Run::end)
.map(end -> end.plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected boolean maintain() {
boolean success = true;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
success = false;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return success;
}
/** Returns whether given deployment has expired according to its TTL */
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected boolean maintain() {
boolean success = true;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
success = false;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return success;
}
/** Returns whether given deployment has expired according to its TTL */
} |
Perhaps we should re-phrase to "At least one is required"? | static public void validateThatReadyCopiesIsCompatibleWithRedundancy(String clusterName, int totalRedundancy, int totalReadyCopies, int groupCount) {
if (totalRedundancy % groupCount != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected equal redundancy per group.");
}
if (totalReadyCopies % groupCount != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected equal amount of ready copies per group, but " +
totalReadyCopies + " ready copies is specified with " + groupCount + " groups");
}
if (totalReadyCopies == 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Warning. No ready copies configured. At least one is recommended.");
}
} | throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Warning. No ready copies configured. At least one is recommended."); | static public void validateThatReadyCopiesIsCompatibleWithRedundancy(String clusterName, int totalRedundancy, int totalReadyCopies, int groupCount) {
if (totalRedundancy % groupCount != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected equal redundancy per group.");
}
if (totalReadyCopies % groupCount != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected equal amount of ready copies per group, but " +
totalReadyCopies + " ready copies is specified with " + groupCount + " groups");
}
if (totalReadyCopies == 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Warning. No ready copies configured. At least one is required.");
}
} | class IndexedHierarchicDistributionValidator {
private final String clusterName;
private final StorageGroup rootGroup;
private final Redundancy redundancy;
private final DispatchTuning.DispatchPolicy dispatchPolicy;
public IndexedHierarchicDistributionValidator(String clusterName,
StorageGroup rootGroup,
Redundancy redundancy,
DispatchTuning.DispatchPolicy dispatchPolicy) {
this.clusterName = clusterName;
this.rootGroup = rootGroup;
this.redundancy = redundancy;
this.dispatchPolicy = dispatchPolicy;
}
public void validate() {
validateThatWeHaveOneGroupLevel();
validateThatLeafGroupsHasEqualNumberOfNodes();
validateThatLeafGroupsCountIsAFactorOfRedundancy(clusterName, redundancy.effectiveFinalRedundancy(), rootGroup.getSubgroups().size());
validateThatRedundancyPerGroupIsEqual();
validateThatReadyCopiesIsCompatibleWithRedundancy(clusterName, redundancy.effectiveFinalRedundancy(), redundancy.effectiveReadyCopies(), rootGroup.getSubgroups().size());
}
private void validateThatWeHaveOneGroupLevel() {
for (StorageGroup group : rootGroup.getSubgroups()) {
if (group.getSubgroups().size() > 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected all groups under root group '" +
rootGroup.getName() + "' to be leaf groups only containing nodes, but sub group '" + group.getName() + "' contains " +
group.getSubgroups().size() + " sub groups.");
}
}
}
private void validateThatLeafGroupsHasEqualNumberOfNodes() {
if (dispatchPolicy != DispatchTuning.DispatchPolicy.ROUNDROBIN) return;
StorageGroup previousGroup = null;
for (StorageGroup group : rootGroup.getSubgroups()) {
if (previousGroup == null) {
previousGroup = group;
continue;
}
if (group.getNodes().size() != previousGroup.getNodes().size())
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected leaf groups to contain an equal number of nodes, but leaf group '" +
previousGroup.getName() + "' contains " + previousGroup.getNodes().size() + " node(s) while leaf group '" +
group.getName() + "' contains " + group.getNodes().size() + " node(s).");
previousGroup = group;
}
}
static public void validateThatLeafGroupsCountIsAFactorOfRedundancy(String clusterName, int totalRedundancy, int subGroups) {
if (totalRedundancy % subGroups != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected number of leaf groups (" +
subGroups + ") to be a factor of redundancy (" +
totalRedundancy + "), but it is not.");
}
}
private void validateThatRedundancyPerGroupIsEqual() {
int redundancyPerGroup = redundancy.effectiveFinalRedundancy() / rootGroup.getSubgroups().size();
String expPartitions = createDistributionPartitions(redundancyPerGroup, rootGroup.getSubgroups().size());
if (!rootGroup.getPartitions().get().equals(expPartitions)) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected redundancy per leaf group to be " +
redundancyPerGroup + ", but it is not according to distribution partitions '" +
rootGroup.getPartitions().get() + "'. Expected distribution partitions should be '" + expPartitions + "'.");
}
}
private String createDistributionPartitions(int redundancyPerGroup, int numGroups) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < numGroups - 1; ++i) {
sb.append(redundancyPerGroup);
sb.append("|");
}
sb.append("*");
return sb.toString();
}
static private String getErrorMsgPrefix(String clusterName) {
return "In indexed content cluster '" + clusterName + "' using hierarchic distribution: ";
}
} | class IndexedHierarchicDistributionValidator {
private final String clusterName;
private final StorageGroup rootGroup;
private final Redundancy redundancy;
private final DispatchTuning.DispatchPolicy dispatchPolicy;
public IndexedHierarchicDistributionValidator(String clusterName,
StorageGroup rootGroup,
Redundancy redundancy,
DispatchTuning.DispatchPolicy dispatchPolicy) {
this.clusterName = clusterName;
this.rootGroup = rootGroup;
this.redundancy = redundancy;
this.dispatchPolicy = dispatchPolicy;
}
public void validate() {
validateThatWeHaveOneGroupLevel();
validateThatLeafGroupsHasEqualNumberOfNodes();
validateThatLeafGroupsCountIsAFactorOfRedundancy(clusterName, redundancy.effectiveFinalRedundancy(), rootGroup.getSubgroups().size());
validateThatRedundancyPerGroupIsEqual();
validateThatReadyCopiesIsCompatibleWithRedundancy(clusterName, redundancy.effectiveFinalRedundancy(), redundancy.effectiveReadyCopies(), rootGroup.getSubgroups().size());
}
private void validateThatWeHaveOneGroupLevel() {
for (StorageGroup group : rootGroup.getSubgroups()) {
if (group.getSubgroups().size() > 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected all groups under root group '" +
rootGroup.getName() + "' to be leaf groups only containing nodes, but sub group '" + group.getName() + "' contains " +
group.getSubgroups().size() + " sub groups.");
}
}
}
private void validateThatLeafGroupsHasEqualNumberOfNodes() {
if (dispatchPolicy != DispatchTuning.DispatchPolicy.ROUNDROBIN) return;
StorageGroup previousGroup = null;
for (StorageGroup group : rootGroup.getSubgroups()) {
if (previousGroup == null) {
previousGroup = group;
continue;
}
if (group.getNodes().size() != previousGroup.getNodes().size())
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected leaf groups to contain an equal number of nodes, but leaf group '" +
previousGroup.getName() + "' contains " + previousGroup.getNodes().size() + " node(s) while leaf group '" +
group.getName() + "' contains " + group.getNodes().size() + " node(s).");
previousGroup = group;
}
}
static public void validateThatLeafGroupsCountIsAFactorOfRedundancy(String clusterName, int totalRedundancy, int subGroups) {
if (totalRedundancy % subGroups != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected number of leaf groups (" +
subGroups + ") to be a factor of redundancy (" +
totalRedundancy + "), but it is not.");
}
}
private void validateThatRedundancyPerGroupIsEqual() {
int redundancyPerGroup = redundancy.effectiveFinalRedundancy() / rootGroup.getSubgroups().size();
String expPartitions = createDistributionPartitions(redundancyPerGroup, rootGroup.getSubgroups().size());
if (!rootGroup.getPartitions().get().equals(expPartitions)) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected redundancy per leaf group to be " +
redundancyPerGroup + ", but it is not according to distribution partitions '" +
rootGroup.getPartitions().get() + "'. Expected distribution partitions should be '" + expPartitions + "'.");
}
}
private String createDistributionPartitions(int redundancyPerGroup, int numGroups) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < numGroups - 1; ++i) {
sb.append(redundancyPerGroup);
sb.append("|");
}
sb.append("*");
return sb.toString();
}
static private String getErrorMsgPrefix(String clusterName) {
return "In indexed content cluster '" + clusterName + "' using hierarchic distribution: ";
}
} |
why not remove src group? | private void mergeGroup(Group srcGroup, Group dstGroup) {
if (srcGroup == rootGroup) {
rootGroup = dstGroup;
}
List<GroupExpression> needReinsertedExpressions = Lists.newArrayList();
for (Iterator<Map.Entry<GroupExpression, GroupExpression>>
iterator = groupExpressions.entrySet().iterator(); iterator.hasNext(); ) {
GroupExpression groupExpr = iterator.next().getKey();
int referSrcGroupIndex = -1;
for (int i = 0; i < groupExpr.getInputs().size(); i++) {
if (groupExpr.getInputs().get(i) == srcGroup) {
referSrcGroupIndex = i;
break;
}
}
if (referSrcGroupIndex >= 0) {
iterator.remove();
groupExpr.getInputs().set(referSrcGroupIndex, dstGroup);
needReinsertedExpressions.add(groupExpr);
}
if (groupExpr.getGroup() == srcGroup) {
groupExpr.setGroup(dstGroup);
}
}
for (GroupExpression groupExpression : needReinsertedExpressions) {
if (!groupExpressions.containsKey(groupExpression)) {
groupExpressions.put(groupExpression, groupExpression);
} else {
groupExpression.getGroup().removeGroupExpression(groupExpression);
groupExpression.setUnused(true);
GroupExpression existGroupExpression = groupExpressions.get(groupExpression);
if (!needMerge(groupExpression.getGroup(), existGroupExpression.getGroup())) {
groupExpression.getGroup().replaceBestExpression(groupExpression, existGroupExpression);
}
}
}
dstGroup.mergeGroup(srcGroup);
List<Group> groups = getAllEmptyGroups();
for (Group group : groups) {
removeOneGroup(group);
}
} | private void mergeGroup(Group srcGroup, Group dstGroup) {
groups.remove(srcGroup);
if (srcGroup == rootGroup) {
rootGroup = dstGroup;
}
List<GroupExpression> needReinsertedExpressions = Lists.newArrayList();
for (Iterator<Map.Entry<GroupExpression, GroupExpression>>
iterator = groupExpressions.entrySet().iterator(); iterator.hasNext(); ) {
GroupExpression groupExpr = iterator.next().getKey();
int referSrcGroupIndex = -1;
for (int i = 0; i < groupExpr.getInputs().size(); i++) {
if (groupExpr.getInputs().get(i) == srcGroup) {
referSrcGroupIndex = i;
break;
}
}
if (referSrcGroupIndex >= 0) {
iterator.remove();
groupExpr.getInputs().set(referSrcGroupIndex, dstGroup);
needReinsertedExpressions.add(groupExpr);
}
if (groupExpr.getGroup() == srcGroup) {
groupExpr.setGroup(dstGroup);
}
}
for (GroupExpression groupExpression : needReinsertedExpressions) {
if (!groupExpressions.containsKey(groupExpression)) {
groupExpressions.put(groupExpression, groupExpression);
} else {
groupExpression.getGroup().removeGroupExpression(groupExpression);
groupExpression.setUnused(true);
GroupExpression existGroupExpression = groupExpressions.get(groupExpression);
if (!needMerge(groupExpression.getGroup(), existGroupExpression.getGroup())) {
groupExpression.getGroup().replaceBestExpression(groupExpression, existGroupExpression);
}
}
}
dstGroup.mergeGroup(srcGroup);
List<Group> groups = getAllEmptyGroups();
for (Group group : groups) {
removeOneGroup(group);
}
} | class Memo {
private static final Logger LOG = LogManager.getLogger(Memo.class);
private int nextGroupId = 0;
private final List<Group> groups;
private Group rootGroup;
/**
* The map value is root group id for the GroupExpression.
* We need to store group id because when {@see insertGroupExpression}
* we need to get existed group id for tmp GroupExpression,
* which doesn't have group id info
*/
private final Map<GroupExpression, GroupExpression> groupExpressions;
public List<Group> getGroups() {
return groups;
}
public Map<GroupExpression, GroupExpression> getGroupExpressions() {
return groupExpressions;
}
public Memo() {
groups = Lists.newLinkedList();
groupExpressions = Maps.newHashMap();
}
public Group getRootGroup() {
return rootGroup;
}
/**
* Copy an expression into search space, this function will add an GroupExpression for
* this Expression. If this Expression has children, this function will be called
* recursively to create GroupExpression and Group for every single Expression
* For example, Join(Scan(A), Scan(B)) will create 3 Groups and GroupExpressions for Join,
* Scan(A) and Scan(B).
* We return GroupExpression rather than Group because we can get Group from GroupExpression
*/
public GroupExpression init(OptExpression originExpression) {
Preconditions.checkState(groups.size() == 0);
Preconditions.checkState(groupExpressions.size() == 0);
GroupExpression rootGroupExpression = copyIn(null, originExpression).second;
rootGroup = rootGroupExpression.getGroup();
return rootGroupExpression;
}
public Pair<Boolean, GroupExpression> insertGroupExpression(GroupExpression groupExpression, Group targetGroup) {
if (groupExpressions.get(groupExpression) != null) {
GroupExpression existedGroupExpression = groupExpressions.get(groupExpression);
Group existedGroup = existedGroupExpression.getGroup();
if (needMerge(targetGroup, existedGroup)) {
mergeGroup(existedGroup, targetGroup);
}
return new Pair<>(false, existedGroupExpression);
}
if (targetGroup == null) {
targetGroup = newGroup();
groups.add(targetGroup);
}
groupExpressions.put(groupExpression, groupExpression);
targetGroup.addExpression(groupExpression);
return new Pair<>(true, groupExpression);
}
/**
* Insert an enforce expression into the target group.
*/
public void insertEnforceExpression(GroupExpression groupExpression, Group targetGroup) {
groupExpression.setGroup(targetGroup);
}
private Group newGroup() {
return new Group(nextGroupId++);
}
public Pair<Boolean, GroupExpression> copyIn(Group targetGroup, OptExpression expression) {
List<Group> inputs = Lists.newArrayList();
for (OptExpression input : expression.getInputs()) {
Group group;
if (input.getGroupExpression() != null) {
group = input.getGroupExpression().getGroup();
} else {
group = copyIn(null, input).second.getGroup();
}
Preconditions.checkState(group != null);
Preconditions.checkState(group != targetGroup);
inputs.add(group);
}
GroupExpression groupExpression = new GroupExpression(expression.getOp(), inputs);
Pair<Boolean, GroupExpression> result = insertGroupExpression(groupExpression, targetGroup);
if (result.first && targetGroup == null) {
Preconditions.checkState(result.second.getOp().isLogical());
result.second.deriveLogicalPropertyItself();
result.second.getGroup().setStatistics(expression.getStatistics());
}
return result;
}
private boolean needMerge(Group targetGroup, Group existedGroup) {
return targetGroup != null && targetGroup != existedGroup;
}
private List<Group> getAllEmptyGroups() {
List<Group> groups = Lists.newArrayList();
for (Group group : getGroups()) {
if (group.isEmpty()) {
groups.add(group);
continue;
}
for (Group childGroup : group.getFirstLogicalExpression().getInputs()) {
if (childGroup.isEmpty()) {
groups.add(childGroup);
break;
}
}
}
return groups;
}
public void removeAllEmptyGroup() {
List<Group> groups = getAllEmptyGroups();
while (!groups.isEmpty()) {
for (Group group : groups) {
removeOneGroup(group);
}
groups = getAllEmptyGroups();
}
}
private void removeOneGroup(Group group) {
groups.remove(group);
for (Iterator<Map.Entry<GroupExpression, GroupExpression>>
iterator = groupExpressions.entrySet().iterator(); iterator.hasNext(); ) {
GroupExpression groupExpr = iterator.next().getKey();
if (groupExpr.getGroup() == group) {
iterator.remove();
continue;
}
for (int i = 0; i < groupExpr.getInputs().size(); i++) {
if (groupExpr.getInputs().get(i) == group) {
groupExpr.getGroup().removeGroupExpression(groupExpr);
iterator.remove();
break;
}
}
}
}
private void deepSearchGroup(Group root, LinkedList<Integer> touch) {
for (Group group : root.getFirstLogicalExpression().getInputs()) {
touch.add(group.getId());
deepSearchGroup(group, touch);
}
}
/**
* When performing replaceRewriteExpression, some groups may not be reachable by rootGroup.
* These groups should be replaced.
* In order to reduce the number of groups entering Memo,
* we will delete inaccessible groups in this function.
*/
public void removeUnreachableGroup() {
LinkedList<Integer> touch = new LinkedList<>();
touch.add(rootGroup.getId());
deepSearchGroup(rootGroup, touch);
List<Group> groupsCopy = new ArrayList<>(groups);
for (Group group : groupsCopy) {
if (!touch.contains(group.getId())) {
removeOneGroup(group);
}
}
}
public void replaceRewriteExpression(Group targetGroup, OptExpression expression) {
removeGroupInitLogicExpression(targetGroup);
GroupExpression groupExpression = copyIn(targetGroup, expression).second;
groupExpression.deriveLogicalPropertyItself();
}
private void removeGroupInitLogicExpression(Group group) {
GroupExpression initGroupExpression = group.getFirstLogicalExpression();
groupExpressions.remove(initGroupExpression);
Preconditions.checkState(group.isValidInitState());
group.getLogicalExpressions().clear();
}
public void deriveAllGroupLogicalProperty() {
getRootGroup().getFirstLogicalExpression().deriveLogicalPropertyRecursively();
}
} | class Memo {
private static final Logger LOG = LogManager.getLogger(Memo.class);
private int nextGroupId = 0;
private final List<Group> groups;
private Group rootGroup;
/**
* The map value is root group id for the GroupExpression.
* We need to store group id because when {@see insertGroupExpression}
* we need to get existed group id for tmp GroupExpression,
* which doesn't have group id info
*/
private final Map<GroupExpression, GroupExpression> groupExpressions;
public List<Group> getGroups() {
return groups;
}
public Map<GroupExpression, GroupExpression> getGroupExpressions() {
return groupExpressions;
}
public Memo() {
groups = Lists.newLinkedList();
groupExpressions = Maps.newHashMap();
}
public Group getRootGroup() {
return rootGroup;
}
/**
* Copy an expression into search space, this function will add an GroupExpression for
* this Expression. If this Expression has children, this function will be called
* recursively to create GroupExpression and Group for every single Expression
* For example, Join(Scan(A), Scan(B)) will create 3 Groups and GroupExpressions for Join,
* Scan(A) and Scan(B).
* We return GroupExpression rather than Group because we can get Group from GroupExpression
*/
public GroupExpression init(OptExpression originExpression) {
Preconditions.checkState(groups.size() == 0);
Preconditions.checkState(groupExpressions.size() == 0);
GroupExpression rootGroupExpression = copyIn(null, originExpression).second;
rootGroup = rootGroupExpression.getGroup();
return rootGroupExpression;
}
public Pair<Boolean, GroupExpression> insertGroupExpression(GroupExpression groupExpression, Group targetGroup) {
if (groupExpressions.get(groupExpression) != null) {
GroupExpression existedGroupExpression = groupExpressions.get(groupExpression);
Group existedGroup = existedGroupExpression.getGroup();
if (needMerge(targetGroup, existedGroup)) {
mergeGroup(existedGroup, targetGroup);
}
return new Pair<>(false, existedGroupExpression);
}
if (targetGroup == null) {
targetGroup = newGroup();
groups.add(targetGroup);
}
groupExpressions.put(groupExpression, groupExpression);
targetGroup.addExpression(groupExpression);
return new Pair<>(true, groupExpression);
}
/**
* Insert an enforce expression into the target group.
*/
public void insertEnforceExpression(GroupExpression groupExpression, Group targetGroup) {
groupExpression.setGroup(targetGroup);
}
private Group newGroup() {
return new Group(nextGroupId++);
}
public Pair<Boolean, GroupExpression> copyIn(Group targetGroup, OptExpression expression) {
List<Group> inputs = Lists.newArrayList();
for (OptExpression input : expression.getInputs()) {
Group group;
if (input.getGroupExpression() != null) {
group = input.getGroupExpression().getGroup();
} else {
group = copyIn(null, input).second.getGroup();
}
Preconditions.checkState(group != null);
Preconditions.checkState(group != targetGroup);
inputs.add(group);
}
GroupExpression groupExpression = new GroupExpression(expression.getOp(), inputs);
Pair<Boolean, GroupExpression> result = insertGroupExpression(groupExpression, targetGroup);
if (result.first && targetGroup == null) {
Preconditions.checkState(result.second.getOp().isLogical());
result.second.deriveLogicalPropertyItself();
result.second.getGroup().setStatistics(expression.getStatistics());
}
return result;
}
private boolean needMerge(Group targetGroup, Group existedGroup) {
return targetGroup != null && targetGroup != existedGroup;
}
private List<Group> getAllEmptyGroups() {
List<Group> groups = Lists.newArrayList();
for (Group group : getGroups()) {
if (group.isEmpty()) {
groups.add(group);
continue;
}
for (Group childGroup : group.getFirstLogicalExpression().getInputs()) {
if (childGroup.isEmpty()) {
groups.add(childGroup);
break;
}
}
}
return groups;
}
public void removeAllEmptyGroup() {
List<Group> groups = getAllEmptyGroups();
while (!groups.isEmpty()) {
for (Group group : groups) {
removeOneGroup(group);
}
groups = getAllEmptyGroups();
}
}
private void removeOneGroup(Group group) {
groups.remove(group);
for (Iterator<Map.Entry<GroupExpression, GroupExpression>>
iterator = groupExpressions.entrySet().iterator(); iterator.hasNext(); ) {
GroupExpression groupExpr = iterator.next().getKey();
if (groupExpr.getGroup() == group) {
iterator.remove();
continue;
}
for (int i = 0; i < groupExpr.getInputs().size(); i++) {
if (groupExpr.getInputs().get(i) == group) {
groupExpr.getGroup().removeGroupExpression(groupExpr);
iterator.remove();
break;
}
}
}
}
private void deepSearchGroup(Group root, LinkedList<Integer> touch) {
for (Group group : root.getFirstLogicalExpression().getInputs()) {
touch.add(group.getId());
deepSearchGroup(group, touch);
}
}
/**
* When performing replaceRewriteExpression, some groups may not be reachable by rootGroup.
* These groups should be replaced.
* In order to reduce the number of groups entering Memo,
* we will delete inaccessible groups in this function.
*/
public void removeUnreachableGroup() {
LinkedList<Integer> touch = new LinkedList<>();
touch.add(rootGroup.getId());
deepSearchGroup(rootGroup, touch);
List<Group> groupsCopy = new ArrayList<>(groups);
for (Group group : groupsCopy) {
if (!touch.contains(group.getId())) {
removeOneGroup(group);
}
}
}
public void replaceRewriteExpression(Group targetGroup, OptExpression expression) {
removeGroupInitLogicExpression(targetGroup);
GroupExpression groupExpression = copyIn(targetGroup, expression).second;
groupExpression.deriveLogicalPropertyItself();
}
private void removeGroupInitLogicExpression(Group group) {
GroupExpression initGroupExpression = group.getFirstLogicalExpression();
groupExpressions.remove(initGroupExpression);
Preconditions.checkState(group.isValidInitState());
group.getLogicalExpressions().clear();
}
public void deriveAllGroupLogicalProperty() {
getRootGroup().getFirstLogicalExpression().deriveLogicalPropertyRecursively();
}
} | |
done | static public void validateThatReadyCopiesIsCompatibleWithRedundancy(String clusterName, int totalRedundancy, int totalReadyCopies, int groupCount) {
if (totalRedundancy % groupCount != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected equal redundancy per group.");
}
if (totalReadyCopies % groupCount != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected equal amount of ready copies per group, but " +
totalReadyCopies + " ready copies is specified with " + groupCount + " groups");
}
if (totalReadyCopies == 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Warning. No ready copies configured. At least one is recommended.");
}
} | throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Warning. No ready copies configured. At least one is recommended."); | static public void validateThatReadyCopiesIsCompatibleWithRedundancy(String clusterName, int totalRedundancy, int totalReadyCopies, int groupCount) {
if (totalRedundancy % groupCount != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected equal redundancy per group.");
}
if (totalReadyCopies % groupCount != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected equal amount of ready copies per group, but " +
totalReadyCopies + " ready copies is specified with " + groupCount + " groups");
}
if (totalReadyCopies == 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Warning. No ready copies configured. At least one is required.");
}
} | class IndexedHierarchicDistributionValidator {
private final String clusterName;
private final StorageGroup rootGroup;
private final Redundancy redundancy;
private final DispatchTuning.DispatchPolicy dispatchPolicy;
public IndexedHierarchicDistributionValidator(String clusterName,
StorageGroup rootGroup,
Redundancy redundancy,
DispatchTuning.DispatchPolicy dispatchPolicy) {
this.clusterName = clusterName;
this.rootGroup = rootGroup;
this.redundancy = redundancy;
this.dispatchPolicy = dispatchPolicy;
}
public void validate() {
validateThatWeHaveOneGroupLevel();
validateThatLeafGroupsHasEqualNumberOfNodes();
validateThatLeafGroupsCountIsAFactorOfRedundancy(clusterName, redundancy.effectiveFinalRedundancy(), rootGroup.getSubgroups().size());
validateThatRedundancyPerGroupIsEqual();
validateThatReadyCopiesIsCompatibleWithRedundancy(clusterName, redundancy.effectiveFinalRedundancy(), redundancy.effectiveReadyCopies(), rootGroup.getSubgroups().size());
}
private void validateThatWeHaveOneGroupLevel() {
for (StorageGroup group : rootGroup.getSubgroups()) {
if (group.getSubgroups().size() > 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected all groups under root group '" +
rootGroup.getName() + "' to be leaf groups only containing nodes, but sub group '" + group.getName() + "' contains " +
group.getSubgroups().size() + " sub groups.");
}
}
}
private void validateThatLeafGroupsHasEqualNumberOfNodes() {
if (dispatchPolicy != DispatchTuning.DispatchPolicy.ROUNDROBIN) return;
StorageGroup previousGroup = null;
for (StorageGroup group : rootGroup.getSubgroups()) {
if (previousGroup == null) {
previousGroup = group;
continue;
}
if (group.getNodes().size() != previousGroup.getNodes().size())
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected leaf groups to contain an equal number of nodes, but leaf group '" +
previousGroup.getName() + "' contains " + previousGroup.getNodes().size() + " node(s) while leaf group '" +
group.getName() + "' contains " + group.getNodes().size() + " node(s).");
previousGroup = group;
}
}
static public void validateThatLeafGroupsCountIsAFactorOfRedundancy(String clusterName, int totalRedundancy, int subGroups) {
if (totalRedundancy % subGroups != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected number of leaf groups (" +
subGroups + ") to be a factor of redundancy (" +
totalRedundancy + "), but it is not.");
}
}
private void validateThatRedundancyPerGroupIsEqual() {
int redundancyPerGroup = redundancy.effectiveFinalRedundancy() / rootGroup.getSubgroups().size();
String expPartitions = createDistributionPartitions(redundancyPerGroup, rootGroup.getSubgroups().size());
if (!rootGroup.getPartitions().get().equals(expPartitions)) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected redundancy per leaf group to be " +
redundancyPerGroup + ", but it is not according to distribution partitions '" +
rootGroup.getPartitions().get() + "'. Expected distribution partitions should be '" + expPartitions + "'.");
}
}
private String createDistributionPartitions(int redundancyPerGroup, int numGroups) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < numGroups - 1; ++i) {
sb.append(redundancyPerGroup);
sb.append("|");
}
sb.append("*");
return sb.toString();
}
static private String getErrorMsgPrefix(String clusterName) {
return "In indexed content cluster '" + clusterName + "' using hierarchic distribution: ";
}
} | class IndexedHierarchicDistributionValidator {
private final String clusterName;
private final StorageGroup rootGroup;
private final Redundancy redundancy;
private final DispatchTuning.DispatchPolicy dispatchPolicy;
public IndexedHierarchicDistributionValidator(String clusterName,
StorageGroup rootGroup,
Redundancy redundancy,
DispatchTuning.DispatchPolicy dispatchPolicy) {
this.clusterName = clusterName;
this.rootGroup = rootGroup;
this.redundancy = redundancy;
this.dispatchPolicy = dispatchPolicy;
}
public void validate() {
validateThatWeHaveOneGroupLevel();
validateThatLeafGroupsHasEqualNumberOfNodes();
validateThatLeafGroupsCountIsAFactorOfRedundancy(clusterName, redundancy.effectiveFinalRedundancy(), rootGroup.getSubgroups().size());
validateThatRedundancyPerGroupIsEqual();
validateThatReadyCopiesIsCompatibleWithRedundancy(clusterName, redundancy.effectiveFinalRedundancy(), redundancy.effectiveReadyCopies(), rootGroup.getSubgroups().size());
}
private void validateThatWeHaveOneGroupLevel() {
for (StorageGroup group : rootGroup.getSubgroups()) {
if (group.getSubgroups().size() > 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected all groups under root group '" +
rootGroup.getName() + "' to be leaf groups only containing nodes, but sub group '" + group.getName() + "' contains " +
group.getSubgroups().size() + " sub groups.");
}
}
}
private void validateThatLeafGroupsHasEqualNumberOfNodes() {
if (dispatchPolicy != DispatchTuning.DispatchPolicy.ROUNDROBIN) return;
StorageGroup previousGroup = null;
for (StorageGroup group : rootGroup.getSubgroups()) {
if (previousGroup == null) {
previousGroup = group;
continue;
}
if (group.getNodes().size() != previousGroup.getNodes().size())
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected leaf groups to contain an equal number of nodes, but leaf group '" +
previousGroup.getName() + "' contains " + previousGroup.getNodes().size() + " node(s) while leaf group '" +
group.getName() + "' contains " + group.getNodes().size() + " node(s).");
previousGroup = group;
}
}
static public void validateThatLeafGroupsCountIsAFactorOfRedundancy(String clusterName, int totalRedundancy, int subGroups) {
if (totalRedundancy % subGroups != 0) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected number of leaf groups (" +
subGroups + ") to be a factor of redundancy (" +
totalRedundancy + "), but it is not.");
}
}
private void validateThatRedundancyPerGroupIsEqual() {
int redundancyPerGroup = redundancy.effectiveFinalRedundancy() / rootGroup.getSubgroups().size();
String expPartitions = createDistributionPartitions(redundancyPerGroup, rootGroup.getSubgroups().size());
if (!rootGroup.getPartitions().get().equals(expPartitions)) {
throw new IllegalArgumentException(getErrorMsgPrefix(clusterName) + "Expected redundancy per leaf group to be " +
redundancyPerGroup + ", but it is not according to distribution partitions '" +
rootGroup.getPartitions().get() + "'. Expected distribution partitions should be '" + expPartitions + "'.");
}
}
private String createDistributionPartitions(int redundancyPerGroup, int numGroups) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < numGroups - 1; ++i) {
sb.append(redundancyPerGroup);
sb.append("|");
}
sb.append("*");
return sb.toString();
}
static private String getErrorMsgPrefix(String clusterName) {
return "In indexed content cluster '" + clusterName + "' using hierarchic distribution: ";
}
} |
A bit spammy, but OK for now :) | private void waitForApplicationRemoved(TenantApplications applications, ApplicationId applicationId) {
log.log(Level.INFO, "Waiting for " + applicationId + " to be deleted");
Duration duration = Duration.ofSeconds(5);
Instant end = Instant.now().plus(duration);
do {
if ( ! (applications.hasApplication(applicationId)))
return;
log.log(Level.INFO, "Application " + applicationId + " not deleted yet, will retry");
try {
Thread.sleep(100);
} catch (InterruptedException interruptedException) {/* ignore */}
} while (Instant.now().isBefore(end));
log.log(Level.INFO, "Application " + applicationId + " not deleted after " + duration + ", giving up");
} | log.log(Level.INFO, "Application " + applicationId + " not deleted yet, will retry"); | private void waitForApplicationRemoved(TenantApplications applications, ApplicationId applicationId) {
log.log(Level.INFO, "Waiting for " + applicationId + " to be deleted");
Duration duration = Duration.ofSeconds(5);
Instant end = Instant.now().plus(duration);
do {
if ( ! (applications.hasApplication(applicationId)))
return;
log.log(Level.INFO, "Application " + applicationId + " not deleted yet, will retry");
try {
Thread.sleep(100);
} catch (InterruptedException interruptedException) {/* ignore */}
} while (Instant.now().isBefore(end));
log.log(Level.INFO, "Application " + applicationId + " not deleted after " + duration + ", giving up");
} | class Builder {
private TenantRepository tenantRepository;
private Optional<Provisioner> hostProvisioner;
private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher());
private Clock clock = Clock.systemUTC();
private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build();
private Orchestrator orchestrator;
private LogRetriever logRetriever = new LogRetriever();
private TesterClient testerClient = new TesterClient();
private Metric metric = new NullMetric();
private SecretStoreValidator secretStoreValidator = new SecretStoreValidator(new SecretStoreProvider().get());
private FlagSource flagSource = new InMemoryFlagSource();
public Builder withTenantRepository(TenantRepository tenantRepository) {
this.tenantRepository = tenantRepository;
return this;
}
public Builder withClock(Clock clock) {
this.clock = clock;
return this;
}
public Builder withProvisioner(Provisioner provisioner) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = Optional.ofNullable(provisioner);
return this;
}
public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = hostProvisionerProvider.getHostProvisioner();
return this;
}
public Builder withHttpProxy(HttpProxy httpProxy) {
this.httpProxy = httpProxy;
return this;
}
public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) {
this.configserverConfig = configserverConfig;
return this;
}
public Builder withOrchestrator(Orchestrator orchestrator) {
this.orchestrator = orchestrator;
return this;
}
public Builder withLogRetriever(LogRetriever logRetriever) {
this.logRetriever = logRetriever;
return this;
}
public Builder withTesterClient(TesterClient testerClient) {
this.testerClient = testerClient;
return this;
}
public Builder withFlagSource(FlagSource flagSource) {
this.flagSource = flagSource;
return this;
}
public Builder withMetric(Metric metric) {
this.metric = metric;
return this;
}
public Builder withSecretStoreValidator(SecretStoreValidator secretStoreValidator) {
this.secretStoreValidator = secretStoreValidator;
return this;
}
public ApplicationRepository build() {
return new ApplicationRepository(tenantRepository,
hostProvisioner,
InfraDeployerProvider.empty().getInfraDeployer(),
new ConfigConvergenceChecker(),
httpProxy,
configserverConfig,
orchestrator,
logRetriever,
clock,
testerClient,
metric,
secretStoreValidator,
ClusterReindexingStatusClient.DUMMY_INSTANCE);
}
} | class Builder {
private TenantRepository tenantRepository;
private Optional<Provisioner> hostProvisioner;
private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher());
private Clock clock = Clock.systemUTC();
private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build();
private Orchestrator orchestrator;
private LogRetriever logRetriever = new LogRetriever();
private TesterClient testerClient = new TesterClient();
private Metric metric = new NullMetric();
private SecretStoreValidator secretStoreValidator = new SecretStoreValidator(new SecretStoreProvider().get());
private FlagSource flagSource = new InMemoryFlagSource();
public Builder withTenantRepository(TenantRepository tenantRepository) {
this.tenantRepository = tenantRepository;
return this;
}
public Builder withClock(Clock clock) {
this.clock = clock;
return this;
}
public Builder withProvisioner(Provisioner provisioner) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = Optional.ofNullable(provisioner);
return this;
}
public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = hostProvisionerProvider.getHostProvisioner();
return this;
}
public Builder withHttpProxy(HttpProxy httpProxy) {
this.httpProxy = httpProxy;
return this;
}
public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) {
this.configserverConfig = configserverConfig;
return this;
}
public Builder withOrchestrator(Orchestrator orchestrator) {
this.orchestrator = orchestrator;
return this;
}
public Builder withLogRetriever(LogRetriever logRetriever) {
this.logRetriever = logRetriever;
return this;
}
public Builder withTesterClient(TesterClient testerClient) {
this.testerClient = testerClient;
return this;
}
public Builder withFlagSource(FlagSource flagSource) {
this.flagSource = flagSource;
return this;
}
public Builder withMetric(Metric metric) {
this.metric = metric;
return this;
}
public Builder withSecretStoreValidator(SecretStoreValidator secretStoreValidator) {
this.secretStoreValidator = secretStoreValidator;
return this;
}
public ApplicationRepository build() {
return new ApplicationRepository(tenantRepository,
hostProvisioner,
InfraDeployerProvider.empty().getInfraDeployer(),
new ConfigConvergenceChecker(),
httpProxy,
configserverConfig,
orchestrator,
logRetriever,
clock,
testerClient,
metric,
secretStoreValidator,
ClusterReindexingStatusClient.DUMMY_INSTANCE);
}
} |
Right, thanks, I'll add a TODO or just remove it tomorrow. | private void waitForApplicationRemoved(TenantApplications applications, ApplicationId applicationId) {
log.log(Level.INFO, "Waiting for " + applicationId + " to be deleted");
Duration duration = Duration.ofSeconds(5);
Instant end = Instant.now().plus(duration);
do {
if ( ! (applications.hasApplication(applicationId)))
return;
log.log(Level.INFO, "Application " + applicationId + " not deleted yet, will retry");
try {
Thread.sleep(100);
} catch (InterruptedException interruptedException) {/* ignore */}
} while (Instant.now().isBefore(end));
log.log(Level.INFO, "Application " + applicationId + " not deleted after " + duration + ", giving up");
} | log.log(Level.INFO, "Application " + applicationId + " not deleted yet, will retry"); | private void waitForApplicationRemoved(TenantApplications applications, ApplicationId applicationId) {
log.log(Level.INFO, "Waiting for " + applicationId + " to be deleted");
Duration duration = Duration.ofSeconds(5);
Instant end = Instant.now().plus(duration);
do {
if ( ! (applications.hasApplication(applicationId)))
return;
log.log(Level.INFO, "Application " + applicationId + " not deleted yet, will retry");
try {
Thread.sleep(100);
} catch (InterruptedException interruptedException) {/* ignore */}
} while (Instant.now().isBefore(end));
log.log(Level.INFO, "Application " + applicationId + " not deleted after " + duration + ", giving up");
} | class Builder {
private TenantRepository tenantRepository;
private Optional<Provisioner> hostProvisioner;
private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher());
private Clock clock = Clock.systemUTC();
private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build();
private Orchestrator orchestrator;
private LogRetriever logRetriever = new LogRetriever();
private TesterClient testerClient = new TesterClient();
private Metric metric = new NullMetric();
private SecretStoreValidator secretStoreValidator = new SecretStoreValidator(new SecretStoreProvider().get());
private FlagSource flagSource = new InMemoryFlagSource();
public Builder withTenantRepository(TenantRepository tenantRepository) {
this.tenantRepository = tenantRepository;
return this;
}
public Builder withClock(Clock clock) {
this.clock = clock;
return this;
}
public Builder withProvisioner(Provisioner provisioner) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = Optional.ofNullable(provisioner);
return this;
}
public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = hostProvisionerProvider.getHostProvisioner();
return this;
}
public Builder withHttpProxy(HttpProxy httpProxy) {
this.httpProxy = httpProxy;
return this;
}
public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) {
this.configserverConfig = configserverConfig;
return this;
}
public Builder withOrchestrator(Orchestrator orchestrator) {
this.orchestrator = orchestrator;
return this;
}
public Builder withLogRetriever(LogRetriever logRetriever) {
this.logRetriever = logRetriever;
return this;
}
public Builder withTesterClient(TesterClient testerClient) {
this.testerClient = testerClient;
return this;
}
public Builder withFlagSource(FlagSource flagSource) {
this.flagSource = flagSource;
return this;
}
public Builder withMetric(Metric metric) {
this.metric = metric;
return this;
}
public Builder withSecretStoreValidator(SecretStoreValidator secretStoreValidator) {
this.secretStoreValidator = secretStoreValidator;
return this;
}
public ApplicationRepository build() {
return new ApplicationRepository(tenantRepository,
hostProvisioner,
InfraDeployerProvider.empty().getInfraDeployer(),
new ConfigConvergenceChecker(),
httpProxy,
configserverConfig,
orchestrator,
logRetriever,
clock,
testerClient,
metric,
secretStoreValidator,
ClusterReindexingStatusClient.DUMMY_INSTANCE);
}
} | class Builder {
private TenantRepository tenantRepository;
private Optional<Provisioner> hostProvisioner;
private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher());
private Clock clock = Clock.systemUTC();
private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build();
private Orchestrator orchestrator;
private LogRetriever logRetriever = new LogRetriever();
private TesterClient testerClient = new TesterClient();
private Metric metric = new NullMetric();
private SecretStoreValidator secretStoreValidator = new SecretStoreValidator(new SecretStoreProvider().get());
private FlagSource flagSource = new InMemoryFlagSource();
public Builder withTenantRepository(TenantRepository tenantRepository) {
this.tenantRepository = tenantRepository;
return this;
}
public Builder withClock(Clock clock) {
this.clock = clock;
return this;
}
public Builder withProvisioner(Provisioner provisioner) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = Optional.ofNullable(provisioner);
return this;
}
public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = hostProvisionerProvider.getHostProvisioner();
return this;
}
public Builder withHttpProxy(HttpProxy httpProxy) {
this.httpProxy = httpProxy;
return this;
}
public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) {
this.configserverConfig = configserverConfig;
return this;
}
public Builder withOrchestrator(Orchestrator orchestrator) {
this.orchestrator = orchestrator;
return this;
}
public Builder withLogRetriever(LogRetriever logRetriever) {
this.logRetriever = logRetriever;
return this;
}
public Builder withTesterClient(TesterClient testerClient) {
this.testerClient = testerClient;
return this;
}
public Builder withFlagSource(FlagSource flagSource) {
this.flagSource = flagSource;
return this;
}
public Builder withMetric(Metric metric) {
this.metric = metric;
return this;
}
public Builder withSecretStoreValidator(SecretStoreValidator secretStoreValidator) {
this.secretStoreValidator = secretStoreValidator;
return this;
}
public ApplicationRepository build() {
return new ApplicationRepository(tenantRepository,
hostProvisioner,
InfraDeployerProvider.empty().getInfraDeployer(),
new ConfigConvergenceChecker(),
httpProxy,
configserverConfig,
orchestrator,
logRetriever,
clock,
testerClient,
metric,
secretStoreValidator,
ClusterReindexingStatusClient.DUMMY_INSTANCE);
}
} |
Consider adding "for numeric fields with attribute:fast-search". | public void process(boolean validate, boolean documentsOnly) {
for (SDField field : search.allConcreteFields()) {
Dictionary dictionary = field.getDictionary();
if (dictionary == null) continue;
Attribute attribute = field.getAttribute();
if (attribute.getDataType() instanceof NumericDataType ) {
if (attribute.isFastSearch()) {
attribute.setDictionary(dictionary);
} else {
fail(search, field, "You must specify attribute:fast-search to allow dictionary control");
}
} else {
fail(search, field, "You can only specify 'dictionary:' for numeric fields");
}
}
} | fail(search, field, "You can only specify 'dictionary:' for numeric fields"); | public void process(boolean validate, boolean documentsOnly) {
for (SDField field : search.allConcreteFields()) {
Dictionary dictionary = field.getDictionary();
if (dictionary == null) continue;
Attribute attribute = field.getAttribute();
if (attribute.getDataType() instanceof NumericDataType ) {
if (attribute.isFastSearch()) {
attribute.setDictionary(dictionary);
} else {
fail(search, field, "You must specify 'attribute:fast-search' to allow dictionary control");
}
} else {
fail(search, field, "You can only specify 'dictionary:' for numeric fields");
}
}
} | class DictionaryProcessor extends Processor {
public DictionaryProcessor(Search search, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(search, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
} | class DictionaryProcessor extends Processor {
public DictionaryProcessor(Search search, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(search, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
} |
Consider using TestUtil.joinLines() to avoid explicit newline characters. | public void testDefaultDictionarySettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field s1 type string {\n" +
" indexing: attribute | summary\n" +
" }\n" +
"\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
} | "search test {\n" + | public void testDefaultDictionarySettings() throws ParseException {
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field s1 type string {",
" indexing: attribute | summary",
" }",
" field n1 type int {",
" indexing: summary | attribute",
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
@Test
@Test
public void testNumericBtreeSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNumericHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE_AND_HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify attribute:fast-search to allow dictionary control", e.getMessage());
}
}
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
private Search createSearch(String def) throws ParseException {
SearchBuilder sb = SearchBuilder.createFromString(def);
return sb.getSearch();
}
@Test
void verifyNumericDictionaryControl(Dictionary.Type expected,
AttributesConfig.Attribute.Dictionary.Type.Enum expectedConfig,
String ... cfg) throws ParseException
{
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field n1 type int {",
" indexing: summary | attribute",
" attribute:fast-search",
TestUtil.joinLines(cfg),
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(expected, search.getAttribute("n1").getDictionary().getType());
assertEquals(expectedConfig,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNumericBtreeSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE,
AttributesConfig.Attribute.Dictionary.Type.BTREE,
"dictionary:btree");
}
@Test
public void testNumericHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.HASH,
AttributesConfig.Attribute.Dictionary.Type.HASH,
"dictionary:hash");
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE_AND_HASH,
AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
"dictionary:btree", "dictionary:hash");
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify 'attribute:fast-search' to allow dictionary control", e.getMessage());
}
}
} |
Consider adding a helper function to create the search object and perform the asserts as the same is duplicated in the next two tests. | public void testNumericBtreeSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE,
getConfig(search).attribute().get(0).dictionary().type());
} | SearchBuilder sb = SearchBuilder.createFromString(def); | public void testNumericBtreeSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE,
AttributesConfig.Attribute.Dictionary.Type.BTREE,
"dictionary:btree");
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
@Test
public void testDefaultDictionarySettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field s1 type string {\n" +
" indexing: attribute | summary\n" +
" }\n" +
"\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
}
@Test
@Test
public void testNumericHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE_AND_HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify attribute:fast-search to allow dictionary control", e.getMessage());
}
}
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
private Search createSearch(String def) throws ParseException {
SearchBuilder sb = SearchBuilder.createFromString(def);
return sb.getSearch();
}
@Test
public void testDefaultDictionarySettings() throws ParseException {
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field s1 type string {",
" indexing: attribute | summary",
" }",
" field n1 type int {",
" indexing: summary | attribute",
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
}
void verifyNumericDictionaryControl(Dictionary.Type expected,
AttributesConfig.Attribute.Dictionary.Type.Enum expectedConfig,
String ... cfg) throws ParseException
{
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field n1 type int {",
" indexing: summary | attribute",
" attribute:fast-search",
TestUtil.joinLines(cfg),
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(expected, search.getAttribute("n1").getDictionary().getType());
assertEquals(expectedConfig,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
@Test
public void testNumericHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.HASH,
AttributesConfig.Attribute.Dictionary.Type.HASH,
"dictionary:hash");
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE_AND_HASH,
AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
"dictionary:btree", "dictionary:hash");
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify 'attribute:fast-search' to allow dictionary control", e.getMessage());
}
}
} |
Consider adding a helper function that creates the definition string, taking the content of _n1_ as parameter(s). | public void testNumericBtreeSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE,
getConfig(search).attribute().get(0).dictionary().type());
} | "search test {\n" + | public void testNumericBtreeSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE,
AttributesConfig.Attribute.Dictionary.Type.BTREE,
"dictionary:btree");
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
@Test
public void testDefaultDictionarySettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field s1 type string {\n" +
" indexing: attribute | summary\n" +
" }\n" +
"\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
}
@Test
@Test
public void testNumericHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE_AND_HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify attribute:fast-search to allow dictionary control", e.getMessage());
}
}
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
private Search createSearch(String def) throws ParseException {
SearchBuilder sb = SearchBuilder.createFromString(def);
return sb.getSearch();
}
@Test
public void testDefaultDictionarySettings() throws ParseException {
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field s1 type string {",
" indexing: attribute | summary",
" }",
" field n1 type int {",
" indexing: summary | attribute",
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
}
void verifyNumericDictionaryControl(Dictionary.Type expected,
AttributesConfig.Attribute.Dictionary.Type.Enum expectedConfig,
String ... cfg) throws ParseException
{
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field n1 type int {",
" indexing: summary | attribute",
" attribute:fast-search",
TestUtil.joinLines(cfg),
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(expected, search.getAttribute("n1").getDictionary().getType());
assertEquals(expectedConfig,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
@Test
public void testNumericHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.HASH,
AttributesConfig.Attribute.Dictionary.Type.HASH,
"dictionary:hash");
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE_AND_HASH,
AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
"dictionary:btree", "dictionary:hash");
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify 'attribute:fast-search' to allow dictionary control", e.getMessage());
}
}
} |
Fixed | public void testDefaultDictionarySettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field s1 type string {\n" +
" indexing: attribute | summary\n" +
" }\n" +
"\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
} | "search test {\n" + | public void testDefaultDictionarySettings() throws ParseException {
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field s1 type string {",
" indexing: attribute | summary",
" }",
" field n1 type int {",
" indexing: summary | attribute",
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
@Test
@Test
public void testNumericBtreeSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNumericHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE_AND_HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify attribute:fast-search to allow dictionary control", e.getMessage());
}
}
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
private Search createSearch(String def) throws ParseException {
SearchBuilder sb = SearchBuilder.createFromString(def);
return sb.getSearch();
}
@Test
void verifyNumericDictionaryControl(Dictionary.Type expected,
AttributesConfig.Attribute.Dictionary.Type.Enum expectedConfig,
String ... cfg) throws ParseException
{
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field n1 type int {",
" indexing: summary | attribute",
" attribute:fast-search",
TestUtil.joinLines(cfg),
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(expected, search.getAttribute("n1").getDictionary().getType());
assertEquals(expectedConfig,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNumericBtreeSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE,
AttributesConfig.Attribute.Dictionary.Type.BTREE,
"dictionary:btree");
}
@Test
public void testNumericHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.HASH,
AttributesConfig.Attribute.Dictionary.Type.HASH,
"dictionary:hash");
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE_AND_HASH,
AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
"dictionary:btree", "dictionary:hash");
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify 'attribute:fast-search' to allow dictionary control", e.getMessage());
}
}
} |
Fixed | public void testNumericBtreeSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE,
getConfig(search).attribute().get(0).dictionary().type());
} | SearchBuilder sb = SearchBuilder.createFromString(def); | public void testNumericBtreeSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE,
AttributesConfig.Attribute.Dictionary.Type.BTREE,
"dictionary:btree");
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
@Test
public void testDefaultDictionarySettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field s1 type string {\n" +
" indexing: attribute | summary\n" +
" }\n" +
"\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
}
@Test
@Test
public void testNumericHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE_AND_HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify attribute:fast-search to allow dictionary control", e.getMessage());
}
}
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
private Search createSearch(String def) throws ParseException {
SearchBuilder sb = SearchBuilder.createFromString(def);
return sb.getSearch();
}
@Test
public void testDefaultDictionarySettings() throws ParseException {
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field s1 type string {",
" indexing: attribute | summary",
" }",
" field n1 type int {",
" indexing: summary | attribute",
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
}
void verifyNumericDictionaryControl(Dictionary.Type expected,
AttributesConfig.Attribute.Dictionary.Type.Enum expectedConfig,
String ... cfg) throws ParseException
{
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field n1 type int {",
" indexing: summary | attribute",
" attribute:fast-search",
TestUtil.joinLines(cfg),
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(expected, search.getAttribute("n1").getDictionary().getType());
assertEquals(expectedConfig,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
@Test
public void testNumericHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.HASH,
AttributesConfig.Attribute.Dictionary.Type.HASH,
"dictionary:hash");
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE_AND_HASH,
AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
"dictionary:btree", "dictionary:hash");
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify 'attribute:fast-search' to allow dictionary control", e.getMessage());
}
}
} |
Fixed | public void testNumericBtreeSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE,
getConfig(search).attribute().get(0).dictionary().type());
} | "search test {\n" + | public void testNumericBtreeSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE,
AttributesConfig.Attribute.Dictionary.Type.BTREE,
"dictionary:btree");
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
@Test
public void testDefaultDictionarySettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field s1 type string {\n" +
" indexing: attribute | summary\n" +
" }\n" +
"\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
}
@Test
@Test
public void testNumericHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" attribute:fast-search\n" +
" dictionary:hash\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
SearchBuilder sb = SearchBuilder.createFromString(def);
Search search = sb.getSearch();
assertEquals(Dictionary.Type.BTREE_AND_HASH, search.getAttribute("n1").getDictionary().getType());
assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify attribute:fast-search to allow dictionary control", e.getMessage());
}
}
} | class DictionaryTestCase {
private static AttributesConfig getConfig(Search search) {
AttributeFields attributes = new AttributeFields(search);
AttributesConfig.Builder builder = new AttributesConfig.Builder();
attributes.getConfig(builder);
return builder.build();
}
private Search createSearch(String def) throws ParseException {
SearchBuilder sb = SearchBuilder.createFromString(def);
return sb.getSearch();
}
@Test
public void testDefaultDictionarySettings() throws ParseException {
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field s1 type string {",
" indexing: attribute | summary",
" }",
" field n1 type int {",
" indexing: summary | attribute",
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
}
void verifyNumericDictionaryControl(Dictionary.Type expected,
AttributesConfig.Attribute.Dictionary.Type.Enum expectedConfig,
String ... cfg) throws ParseException
{
String def = TestUtil.joinLines(
"search test {",
" document test {",
" field n1 type int {",
" indexing: summary | attribute",
" attribute:fast-search",
TestUtil.joinLines(cfg),
" }",
" }",
"}");
Search search = createSearch(def);
assertEquals(expected, search.getAttribute("n1").getDictionary().getType());
assertEquals(expectedConfig,
getConfig(search).attribute().get(0).dictionary().type());
}
@Test
@Test
public void testNumericHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.HASH,
AttributesConfig.Attribute.Dictionary.Type.HASH,
"dictionary:hash");
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
verifyNumericDictionaryControl(Dictionary.Type.BTREE_AND_HASH,
AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
"dictionary:btree", "dictionary:hash");
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type string {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
}
}
@Test
public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
String def =
"search test {\n" +
" document test {\n" +
" field n1 type int {\n" +
" indexing: summary | attribute\n" +
" dictionary:btree\n" +
" }\n" +
" }\n" +
"}\n";
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
} catch (IllegalArgumentException e) {
assertEquals("For search 'test', field 'n1': You must specify 'attribute:fast-search' to allow dictionary control", e.getMessage());
}
}
} |
Why not check just `b == 0`? | private static double divide(double a, double b) {
if (a == 0 && b == 0) return 0;
return a / b;
} | if (a == 0 && b == 0) return 0; | private static double divide(double a, double b) {
if (a == 0 && b == 0) return 0;
return a / b;
} | class NodeRepoStats {
private final Load load;
private final Load activeLoad;
private NodeRepoStats(Load load, Load activeLoad) {
this.load = load;
this.activeLoad = activeLoad;
}
/**
* Returns the current average work-extracting utilization in this node repo over all nodes.
* Capacity not allocated to active nodes are taken to have 0 utilization as it provides no useful work.
*/
public Load load() { return load; }
/** Returns the current average utilization in this node repo over all active nodes. */
public Load activeLoad() { return activeLoad; }
public static NodeRepoStats computeOver(NodeRepository nodeRepository) {
NodeList allNodes = nodeRepository.nodes().list();
NodeResources totalActiveResources = new NodeResources(0, 0, 0, 0);
double cpu = 0, memory = 0, disk = 0;
for (var nodeTimeseries : nodeRepository.metricsDb().getNodeTimeseries(Duration.ofHours(1), Set.of())) {
Optional<Node> node = allNodes.node(nodeTimeseries.hostname());
if (node.isEmpty() || node.get().state() != Node.State.active) continue;
Optional<NodeMetricSnapshot> snapshot = nodeTimeseries.last();
if (snapshot.isEmpty()) continue;
cpu += snapshot.get().cpu() * node.get().resources().vcpu();
memory += snapshot.get().memory() * node.get().resources().memoryGb();
disk += snapshot.get().disk() * node.get().resources().diskGb();
totalActiveResources = totalActiveResources.add(node.get().resources().justNumbers());
}
NodeResources totalHostResources = new NodeResources(0, 0, 0, 0);
for (var host : allNodes.hosts()) {
totalHostResources = totalHostResources.add(host.resources().justNumbers());
}
Load load = new Load(divide(cpu, totalHostResources.vcpu()),
divide(memory, totalHostResources.memoryGb()),
divide(disk, totalHostResources.diskGb()));
Load activeLoad = new Load(divide(cpu, totalActiveResources.vcpu()),
divide(memory, totalActiveResources.memoryGb()),
divide(disk, totalActiveResources.diskGb()));
return new NodeRepoStats(load, activeLoad);
}
} | class NodeRepoStats {
private final Load load;
private final Load activeLoad;
private NodeRepoStats(Load load, Load activeLoad) {
this.load = load;
this.activeLoad = activeLoad;
}
/**
* Returns the current average work-extracting utilization in this node repo over all nodes.
* Capacity not allocated to active nodes are taken to have 0 utilization as it provides no useful work.
*/
public Load load() { return load; }
/** Returns the current average utilization in this node repo over all active nodes. */
public Load activeLoad() { return activeLoad; }
public static NodeRepoStats computeOver(NodeRepository nodeRepository) {
NodeList allNodes = nodeRepository.nodes().list();
NodeResources totalActiveResources = new NodeResources(0, 0, 0, 0);
double cpu = 0, memory = 0, disk = 0;
for (var nodeTimeseries : nodeRepository.metricsDb().getNodeTimeseries(Duration.ofHours(1), Set.of())) {
Optional<Node> node = allNodes.node(nodeTimeseries.hostname());
if (node.isEmpty() || node.get().state() != Node.State.active) continue;
Optional<NodeMetricSnapshot> snapshot = nodeTimeseries.last();
if (snapshot.isEmpty()) continue;
cpu += snapshot.get().cpu() * node.get().resources().vcpu();
memory += snapshot.get().memory() * node.get().resources().memoryGb();
disk += snapshot.get().disk() * node.get().resources().diskGb();
totalActiveResources = totalActiveResources.add(node.get().resources().justNumbers());
}
NodeResources totalHostResources = new NodeResources(0, 0, 0, 0);
for (var host : allNodes.hosts()) {
totalHostResources = totalHostResources.add(host.resources().justNumbers());
}
Load load = new Load(divide(cpu, totalHostResources.vcpu()),
divide(memory, totalHostResources.memoryGb()),
divide(disk, totalHostResources.diskGb()));
Load activeLoad = new Load(divide(cpu, totalActiveResources.vcpu()),
divide(memory, totalActiveResources.memoryGb()),
divide(disk, totalActiveResources.diskGb()));
return new NodeRepoStats(load, activeLoad);
}
} |
Shouldn't we also divide by ideal here? If the ideal wants 0.4 CPU util, and we use 0.4, we aren't wasting 0.6... | public static NodeRepoStats computeOver(NodeRepository nodeRepository) {
NodeList allNodes = nodeRepository.nodes().list();
NodeResources totalActiveResources = new NodeResources(0, 0, 0, 0);
double cpu = 0, memory = 0, disk = 0;
for (var nodeTimeseries : nodeRepository.metricsDb().getNodeTimeseries(Duration.ofHours(1), Set.of())) {
Optional<Node> node = allNodes.node(nodeTimeseries.hostname());
if (node.isEmpty() || node.get().state() != Node.State.active) continue;
Optional<NodeMetricSnapshot> snapshot = nodeTimeseries.last();
if (snapshot.isEmpty()) continue;
cpu += snapshot.get().cpu() * node.get().resources().vcpu();
memory += snapshot.get().memory() * node.get().resources().memoryGb();
disk += snapshot.get().disk() * node.get().resources().diskGb();
totalActiveResources = totalActiveResources.add(node.get().resources().justNumbers());
}
NodeResources totalHostResources = new NodeResources(0, 0, 0, 0);
for (var host : allNodes.hosts()) {
totalHostResources = totalHostResources.add(host.resources().justNumbers());
}
Load load = new Load(divide(cpu, totalHostResources.vcpu()),
divide(memory, totalHostResources.memoryGb()),
divide(disk, totalHostResources.diskGb()));
Load activeLoad = new Load(divide(cpu, totalActiveResources.vcpu()),
divide(memory, totalActiveResources.memoryGb()),
divide(disk, totalActiveResources.diskGb()));
return new NodeRepoStats(load, activeLoad);
} | cpu += snapshot.get().cpu() * node.get().resources().vcpu(); | public static NodeRepoStats computeOver(NodeRepository nodeRepository) {
NodeList allNodes = nodeRepository.nodes().list();
NodeResources totalActiveResources = new NodeResources(0, 0, 0, 0);
double cpu = 0, memory = 0, disk = 0;
for (var nodeTimeseries : nodeRepository.metricsDb().getNodeTimeseries(Duration.ofHours(1), Set.of())) {
Optional<Node> node = allNodes.node(nodeTimeseries.hostname());
if (node.isEmpty() || node.get().state() != Node.State.active) continue;
Optional<NodeMetricSnapshot> snapshot = nodeTimeseries.last();
if (snapshot.isEmpty()) continue;
cpu += snapshot.get().cpu() * node.get().resources().vcpu();
memory += snapshot.get().memory() * node.get().resources().memoryGb();
disk += snapshot.get().disk() * node.get().resources().diskGb();
totalActiveResources = totalActiveResources.add(node.get().resources().justNumbers());
}
NodeResources totalHostResources = new NodeResources(0, 0, 0, 0);
for (var host : allNodes.hosts()) {
totalHostResources = totalHostResources.add(host.resources().justNumbers());
}
Load load = new Load(divide(cpu, totalHostResources.vcpu()),
divide(memory, totalHostResources.memoryGb()),
divide(disk, totalHostResources.diskGb()));
Load activeLoad = new Load(divide(cpu, totalActiveResources.vcpu()),
divide(memory, totalActiveResources.memoryGb()),
divide(disk, totalActiveResources.diskGb()));
return new NodeRepoStats(load, activeLoad);
} | class NodeRepoStats {
private final Load load;
private final Load activeLoad;
private NodeRepoStats(Load load, Load activeLoad) {
this.load = load;
this.activeLoad = activeLoad;
}
/**
* Returns the current average work-extracting utilization in this node repo over all nodes.
* Capacity not allocated to active nodes are taken to have 0 utilization as it provides no useful work.
*/
public Load load() { return load; }
/** Returns the current average utilization in this node repo over all active nodes. */
public Load activeLoad() { return activeLoad; }
private static double divide(double a, double b) {
if (a == 0 && b == 0) return 0;
return a / b;
}
} | class NodeRepoStats {
private final Load load;
private final Load activeLoad;
private NodeRepoStats(Load load, Load activeLoad) {
this.load = load;
this.activeLoad = activeLoad;
}
/**
* Returns the current average work-extracting utilization in this node repo over all nodes.
* Capacity not allocated to active nodes are taken to have 0 utilization as it provides no useful work.
*/
public Load load() { return load; }
/** Returns the current average utilization in this node repo over all active nodes. */
public Load activeLoad() { return activeLoad; }
private static double divide(double a, double b) {
if (a == 0 && b == 0) return 0;
return a / b;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.