comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
```suggestion return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get()); ``` | private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
return new BufferedInputStream(
cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get()));
} | cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get())); | private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get());
} | class CliClient {
private final PrintStream systemOut;
private final PrintStream systemError;
private final InputStream systemIn;
private final Properties systemProperties;
private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn, Properties systemProperties) {
this.systemOut = systemOut;
this.systemError = systemError;
this.systemIn = systemIn;
this.systemProperties = systemProperties;
}
public static void main(String[] args) {
CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties());
int exitCode = client.run(args);
System.exit(exitCode);
}
private int run(String[] rawArgs) {
CliArguments cliArgs = null;
try {
cliArgs = CliArguments.fromRawArgs(rawArgs);
if (cliArgs.helpSpecified()) {
cliArgs.printHelp(systemOut);
return 0;
}
if (cliArgs.versionSpecified()) {
systemOut.println(Vespa.VERSION);
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
JsonStreamFeeder feeder = createJsonFeeder(cliArgs)) {
if (cliArgs.benchmarkModeEnabled()) {
printBenchmarkResult(feeder.benchmark(in));
} else {
feeder.feed(in);
}
}
return 0;
} catch (CliArguments.CliArgumentsException | IOException e) {
boolean verbose = cliArgs != null && cliArgs.verboseSpecified();
return handleException(verbose, e);
} catch (Exception e) {
boolean verbose = cliArgs != null && cliArgs.verboseSpecified();
return handleException(verbose, "Unknown failure: " + e.getMessage(), e);
}
}
private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException {
FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
cliArgs.connections().ifPresent(builder::setMaxConnections);
cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxConnections);
if (cliArgs.sslHostnameVerificationDisabled()) {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
cliArgs.caCertificates().ifPresent(builder::setCaCertificates);
cliArgs.headers().forEach(builder::addRequestHeader);
return builder.build();
}
private static JsonStreamFeeder createJsonFeeder(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
FeedClient feedClient = createFeedClient(cliArgs);
JsonStreamFeeder.Builder builder = JsonStreamFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
return builder.build();
}
private void printBenchmarkResult(JsonStreamFeeder.BenchmarkResult result) throws IOException {
JsonFactory factory = new JsonFactory();
try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
generator.writeStartObject();
generator.writeNumberField("feeder.runtime", result.duration.toMillis());
generator.writeNumberField("feeder.okcount", result.okCount);
generator.writeNumberField("feeder.errorcount", result.errorCount);
generator.writeNumberField("feeder.throughput", result.throughput);
generator.writeEndObject();
}
}
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
systemError.println(message);
if (debugMode() || verbose) {
exception.printStackTrace(systemError);
}
return 1;
}
private boolean debugMode() {
return Boolean.parseBoolean(systemProperties.getProperty("VESPA_DEBUG", Boolean.FALSE.toString()));
}
private static class AcceptAllHostnameVerifier implements HostnameVerifier {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
} | class CliClient {
private final PrintStream systemOut;
private final PrintStream systemError;
private final InputStream systemIn;
private final Properties systemProperties;
private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn, Properties systemProperties) {
this.systemOut = systemOut;
this.systemError = systemError;
this.systemIn = systemIn;
this.systemProperties = systemProperties;
}
public static void main(String[] args) {
CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties());
int exitCode = client.run(args);
System.exit(exitCode);
}
private int run(String[] rawArgs) {
CliArguments cliArgs = null;
try {
cliArgs = CliArguments.fromRawArgs(rawArgs);
if (cliArgs.helpSpecified()) {
cliArgs.printHelp(systemOut);
return 0;
}
if (cliArgs.versionSpecified()) {
systemOut.println(Vespa.VERSION);
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
JsonStreamFeeder feeder = createJsonFeeder(cliArgs)) {
if (cliArgs.benchmarkModeEnabled()) {
printBenchmarkResult(feeder.benchmark(in));
} else {
feeder.feed(in);
}
}
return 0;
} catch (CliArguments.CliArgumentsException | IOException e) {
boolean verbose = cliArgs != null && cliArgs.verboseSpecified();
return handleException(verbose, e);
} catch (Exception e) {
boolean verbose = cliArgs != null && cliArgs.verboseSpecified();
return handleException(verbose, "Unknown failure: " + e.getMessage(), e);
}
}
private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException {
FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
cliArgs.connections().ifPresent(builder::setMaxConnections);
cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxConnections);
if (cliArgs.sslHostnameVerificationDisabled()) {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
cliArgs.caCertificates().ifPresent(builder::setCaCertificates);
cliArgs.headers().forEach(builder::addRequestHeader);
return builder.build();
}
private static JsonStreamFeeder createJsonFeeder(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
FeedClient feedClient = createFeedClient(cliArgs);
JsonStreamFeeder.Builder builder = JsonStreamFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
return builder.build();
}
private void printBenchmarkResult(JsonStreamFeeder.BenchmarkResult result) throws IOException {
JsonFactory factory = new JsonFactory();
try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
generator.writeStartObject();
generator.writeNumberField("feeder.runtime", result.duration.toMillis());
generator.writeNumberField("feeder.okcount", result.okCount);
generator.writeNumberField("feeder.errorcount", result.errorCount);
generator.writeNumberField("feeder.throughput", result.throughput);
generator.writeEndObject();
}
}
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
systemError.println(message);
if (debugMode() || verbose) {
exception.printStackTrace(systemError);
}
return 1;
}
private boolean debugMode() {
return Boolean.parseBoolean(systemProperties.getProperty("VESPA_DEBUG", Boolean.FALSE.toString()));
}
private static class AcceptAllHostnameVerifier implements HostnameVerifier {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
} |
The restart must be triggered only after it reaches "parked". | private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
List<Node> result;
boolean wantToDeprovision = op == DecommissionOperation.deprovision;
boolean wantToRebuild = op == DecommissionOperation.rebuild;
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
host = lock.node();
result = performOn(list(allocationLock).childrenOf(host),
(node, nodeLock) -> write(node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant),
nodeLock));
Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (op == DecommissionOperation.encrypt) {
Report report = Report.basicReport(Report.WANT_TO_ENCRYPT_ID, Report.Type.UNSPECIFIED, instant, "");
newHost = newHost.with(newHost.reports().withReport(report))
.withRestart(newHost.allocation().get().restartGeneration().withIncreasedWanted());
}
result.add(write(newHost, lock));
}
return result;
} | .withRestart(newHost.allocation().get().restartGeneration().withIncreasedWanted()); | private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
List<Node> result;
boolean wantToDeprovision = op == DecommissionOperation.deprovision;
boolean wantToRebuild = op == DecommissionOperation.rebuild;
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
host = lock.node();
result = performOn(list(allocationLock).childrenOf(host),
(node, nodeLock) -> write(node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant),
nodeLock));
Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (op == DecommissionOperation.encrypt) {
Report report = Report.basicReport(Report.WANT_TO_ENCRYPT_ID, Report.Type.UNSPECIFIED, instant, "");
newHost = newHost.with(newHost.reports().withReport(report));
}
result.add(write(newHost, lock));
}
return result;
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final Zone zone;
private final Clock clock;
private final CuratorDatabaseClient db;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) {
this.zone = zone;
this.clock = clock;
this.db = db;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.withWantToRetire(false,
false,
false,
Agent.system,
clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = requireNode(hostname);
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(List.of(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();;
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent))
return park(node.hostname(), false, agent, reason, transaction);
else
return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, true, agent, reason);
}
public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, keepAllocation, agent, reason, transaction);
transaction.commit();
return parked;
}
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, true, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, true, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, true, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(List.of(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.getCloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.getCloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(StateFilter.from(Node.State.active).and(filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.deprovision, agent, instant);
}
/** Retire and rebuild given host and all of its children */
public List<Node> rebuild(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.rebuild, agent, instant);
}
/** Retire and encrypt given host and all of its children */
public List<Node> encrypt(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.encrypt, agent, instant);
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning());
}
public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
retirementRequestedByOperator;
}
/** The different ways a host can be decommissioned */
private enum DecommissionOperation {
deprovision,
rebuild,
encrypt,
}
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final Zone zone;
private final Clock clock;
private final CuratorDatabaseClient db;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) {
this.zone = zone;
this.clock = clock;
this.db = db;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.withWantToRetire(false,
false,
false,
Agent.system,
clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = requireNode(hostname);
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(List.of(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();;
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent)) {
boolean keepAllocation = node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent();
return park(node.hostname(), keepAllocation, agent, reason, transaction);
} else {
return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, true, agent, reason);
}
public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, keepAllocation, agent, reason, transaction);
transaction.commit();
return parked;
}
private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, true, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, true, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, true, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(List.of(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.getCloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.getCloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restartActive(Predicate<Node> filter) {
return restart(StateFilter.from(Node.State.active).and(filter));
}
/**
* Increases the restart generation of the any nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.deprovision, agent, instant);
}
/** Retire and rebuild given host and all of its children */
public List<Node> rebuild(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.rebuild, agent, instant);
}
/** Retire and encrypt given host and all of its children */
public List<Node> encrypt(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.encrypt, agent, instant);
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning());
}
public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent() ||
retirementRequestedByOperator;
}
/** The different ways a host can be decommissioned */
private enum DecommissionOperation {
deprovision,
rebuild,
encrypt,
}
} |
Of course, thanks! | private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
List<Node> result;
boolean wantToDeprovision = op == DecommissionOperation.deprovision;
boolean wantToRebuild = op == DecommissionOperation.rebuild;
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
host = lock.node();
result = performOn(list(allocationLock).childrenOf(host),
(node, nodeLock) -> write(node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant),
nodeLock));
Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (op == DecommissionOperation.encrypt) {
Report report = Report.basicReport(Report.WANT_TO_ENCRYPT_ID, Report.Type.UNSPECIFIED, instant, "");
newHost = newHost.with(newHost.reports().withReport(report))
.withRestart(newHost.allocation().get().restartGeneration().withIncreasedWanted());
}
result.add(write(newHost, lock));
}
return result;
} | .withRestart(newHost.allocation().get().restartGeneration().withIncreasedWanted()); | private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
List<Node> result;
boolean wantToDeprovision = op == DecommissionOperation.deprovision;
boolean wantToRebuild = op == DecommissionOperation.rebuild;
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
host = lock.node();
result = performOn(list(allocationLock).childrenOf(host),
(node, nodeLock) -> write(node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant),
nodeLock));
Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (op == DecommissionOperation.encrypt) {
Report report = Report.basicReport(Report.WANT_TO_ENCRYPT_ID, Report.Type.UNSPECIFIED, instant, "");
newHost = newHost.with(newHost.reports().withReport(report));
}
result.add(write(newHost, lock));
}
return result;
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final Zone zone;
private final Clock clock;
private final CuratorDatabaseClient db;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) {
this.zone = zone;
this.clock = clock;
this.db = db;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.withWantToRetire(false,
false,
false,
Agent.system,
clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = requireNode(hostname);
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(List.of(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();;
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent))
return park(node.hostname(), false, agent, reason, transaction);
else
return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, true, agent, reason);
}
public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, keepAllocation, agent, reason, transaction);
transaction.commit();
return parked;
}
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, true, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, true, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, true, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(List.of(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.getCloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.getCloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(StateFilter.from(Node.State.active).and(filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.deprovision, agent, instant);
}
/** Retire and rebuild given host and all of its children */
public List<Node> rebuild(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.rebuild, agent, instant);
}
/** Retire and encrypt given host and all of its children */
public List<Node> encrypt(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.encrypt, agent, instant);
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning());
}
public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
retirementRequestedByOperator;
}
/** The different ways a host can be decommissioned */
private enum DecommissionOperation {
deprovision,
rebuild,
encrypt,
}
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final Zone zone;
private final Clock clock;
private final CuratorDatabaseClient db;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) {
this.zone = zone;
this.clock = clock;
this.db = db;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.withWantToRetire(false,
false,
false,
Agent.system,
clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = requireNode(hostname);
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(List.of(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();;
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent)) {
boolean keepAllocation = node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent();
return park(node.hostname(), keepAllocation, agent, reason, transaction);
} else {
return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, true, agent, reason);
}
public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, keepAllocation, agent, reason, transaction);
transaction.commit();
return parked;
}
private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, true, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, true, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, true, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(List.of(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.getCloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.getCloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restartActive(Predicate<Node> filter) {
return restart(StateFilter.from(Node.State.active).and(filter));
}
/**
* Increases the restart generation of the any nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.deprovision, agent, instant);
}
/** Retire and rebuild given host and all of its children */
public List<Node> rebuild(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.rebuild, agent, instant);
}
/** Retire and encrypt given host and all of its children */
public List<Node> encrypt(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.encrypt, agent, instant);
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning());
}
public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent() ||
retirementRequestedByOperator;
}
/** The different ways a host can be decommissioned */
private enum DecommissionOperation {
deprovision,
rebuild,
encrypt,
}
} |
Only restart if not already restarting? | private void triggerRestart(NodeList allNodes, NodeType nodeType) {
NodeList hostsReadyToEncrypt = allNodes.nodeType(nodeType).state(Node.State.parked).encrypting();
nodeRepository().nodes().restart(NodeListFilter.from(hostsReadyToEncrypt.asList()));
} | nodeRepository().nodes().restart(NodeListFilter.from(hostsReadyToEncrypt.asList())); | private void triggerRestart(NodeList allNodes, NodeType nodeType) {
NodeList hostsReadyToEncrypt = allNodes.nodeType(nodeType)
.state(Node.State.parked)
.encrypting()
.not().matching(node -> node.allocation().isPresent() &&
node.allocation().get().restartGeneration().pending());
nodeRepository().nodes().restart(NodeListFilter.from(hostsReadyToEncrypt.asList()));
} | class HostEncrypter extends NodeRepositoryMaintainer {
private static final Logger LOG = Logger.getLogger(HostEncrypter.class.getName());
private final IntFlag maxEncryptingHosts;
public HostEncrypter(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.maxEncryptingHosts = Flags.MAX_ENCRYPTING_HOSTS.bindTo(nodeRepository.flagSource());
}
@Override
protected boolean maintain() {
Instant now = nodeRepository().clock().instant();
NodeList allNodes = nodeRepository().nodes().list();
for (var nodeType : NodeType.values()) {
if (!nodeType.isHost()) continue;
unencryptedHosts(allNodes, nodeType).forEach(host -> encrypt(host, now));
triggerRestart(allNodes, nodeType);
}
return true;
}
/** Returns unencrypted hosts of given type that can be encrypted */
private List<Node> unencryptedHosts(NodeList allNodes, NodeType hostType) {
if (!hostType.isHost()) throw new IllegalArgumentException("Expected host type, got " + hostType);
NodeList hostsOfTargetType = allNodes.nodeType(hostType);
int hostLimit = hostLimit(hostsOfTargetType, hostType);
NodeList activeNodes = allNodes.state(Node.State.active);
Set<ClusterId> retiringClusters = new HashSet<>(activeNodes.nodeType(hostType.childNodeType())
.retiring()
.statefulClusters());
List<Node> hostsToEncrypt = new ArrayList<>(hostLimit);
NodeList candidates = hostsOfTargetType.state(Node.State.active)
.not().encrypted()
.not().encrypting()
.matching(node -> node.status().osVersion().current()
.orElse(Version.emptyVersion).getMajor() >= 8);
for (Node host : candidates) {
if (hostsToEncrypt.size() == hostLimit) break;
Set<ClusterId> clustersOnHost = activeNodes.childrenOf(host).statefulClusters();
boolean canEncrypt = Collections.disjoint(retiringClusters, clustersOnHost);
if (canEncrypt) {
hostsToEncrypt.add(host);
retiringClusters.addAll(clustersOnHost);
}
}
return Collections.unmodifiableList(hostsToEncrypt);
}
/** Returns the number of hosts that can encrypt concurrently */
private int hostLimit(NodeList hosts, NodeType hostType) {
if (hosts.stream().anyMatch(host -> host.type() != hostType)) throw new IllegalArgumentException("All hosts must be a " + hostType);
if (maxEncryptingHosts.value() < 1) return 0;
int limit = hostType == NodeType.host ? maxEncryptingHosts.value() : 1;
return Math.max(0, limit - hosts.encrypting().size());
}
/** Trigger restart of encrypting nodes to allow disk encryption to happen */
private void encrypt(Node host, Instant now) {
LOG.info("Retiring and encrypting " + host);
nodeRepository().nodes().encrypt(host.hostname(), Agent.HostEncrypter, now);
}
} | class HostEncrypter extends NodeRepositoryMaintainer {
private static final Logger LOG = Logger.getLogger(HostEncrypter.class.getName());
private final IntFlag maxEncryptingHosts;
public HostEncrypter(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.maxEncryptingHosts = Flags.MAX_ENCRYPTING_HOSTS.bindTo(nodeRepository.flagSource());
}
@Override
protected boolean maintain() {
Instant now = nodeRepository().clock().instant();
NodeList allNodes = nodeRepository().nodes().list();
for (var nodeType : NodeType.values()) {
if (!nodeType.isHost()) continue;
unencryptedHosts(allNodes, nodeType).forEach(host -> encrypt(host, now));
triggerRestart(allNodes, nodeType);
}
return true;
}
/** Returns unencrypted hosts of given type that can be encrypted */
private List<Node> unencryptedHosts(NodeList allNodes, NodeType hostType) {
if (!hostType.isHost()) throw new IllegalArgumentException("Expected host type, got " + hostType);
NodeList hostsOfTargetType = allNodes.nodeType(hostType);
int hostLimit = hostLimit(hostsOfTargetType, hostType);
NodeList activeNodes = allNodes.state(Node.State.active);
Set<ClusterId> retiringClusters = new HashSet<>(activeNodes.nodeType(hostType.childNodeType())
.retiring()
.statefulClusters());
List<Node> hostsToEncrypt = new ArrayList<>(hostLimit);
NodeList candidates = hostsOfTargetType.state(Node.State.active)
.not().encrypted()
.not().encrypting()
.matching(node -> node.status().osVersion().current()
.orElse(Version.emptyVersion).getMajor() >= 8);
for (Node host : candidates) {
if (hostsToEncrypt.size() == hostLimit) break;
Set<ClusterId> clustersOnHost = activeNodes.childrenOf(host).statefulClusters();
boolean canEncrypt = Collections.disjoint(retiringClusters, clustersOnHost);
if (canEncrypt) {
hostsToEncrypt.add(host);
retiringClusters.addAll(clustersOnHost);
}
}
return Collections.unmodifiableList(hostsToEncrypt);
}
/** Returns the number of hosts that can encrypt concurrently */
private int hostLimit(NodeList hosts, NodeType hostType) {
if (hosts.stream().anyMatch(host -> host.type() != hostType)) throw new IllegalArgumentException("All hosts must be a " + hostType);
if (maxEncryptingHosts.value() < 1) return 0;
int limit = hostType == NodeType.host ? maxEncryptingHosts.value() : 1;
return Math.max(0, limit - hosts.encrypting().size());
}
/** Trigger restart of encrypting nodes to allow disk encryption to happen */
private void encrypt(Node host, Instant now) {
LOG.info("Retiring and encrypting " + host);
nodeRepository().nodes().encrypt(host.hostname(), Agent.HostEncrypter, now);
}
} |
Nit: instant won't be null so Optional.of is sufficient. | private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.toSlime(allowed, false, Optional.ofNullable(now)));
} | return new SlimeJsonResponse(SupportAccessSerializer.toSlime(allowed, false, Optional.ofNullable(now))); | private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.toSlime(allowed, false, Optional.of(now)));
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.code()) {
case NOT_FOUND:
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse notifications(String tenantName, HttpRequest request) {
NotificationSource notificationSource = new NotificationSource(TenantName.from(tenantName),
Optional.ofNullable(request.getProperty("application")).map(ApplicationName::from),
Optional.ofNullable(request.getProperty("instance")).map(InstanceName::from),
Optional.empty(), Optional.empty(), Optional.empty(), OptionalLong.empty());
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
controller.notificationsDb().listNotifications(notificationSource, showOnlyProductionInstances(request))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification));
return new SlimeJsonResponse(slime);
}
private static void toSlime(Cursor cursor, Notification notification) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
switch (type) {
case applicationPackage: return "applicationPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
}
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.history().stream().anyMatch(event -> "down".equals(event.getEvent())));
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.toSlime(supportAccess, false, Optional.ofNullable(controller.clock().instant())));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.toSlime(disallowed, false, Optional.ofNullable(controller.clock().instant())));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.targets(deploymentId.zoneId());
if (!legacyEndpoints) {
globalEndpoints = globalEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant())
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
List.of(controller.zoneRegistry().getConfigServerVipUri(zone)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
List.of(controller.zoneRegistry().getConfigServerVipUri(deploymentId.zoneId())),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
cloudTenant.archiveAccessRole().ifPresent(role -> object.setString("archiveAccessRole", role));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
}
else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream())
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(Deployment::at)
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.system()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.code()) {
case NOT_FOUND:
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse notifications(String tenantName, HttpRequest request) {
NotificationSource notificationSource = new NotificationSource(TenantName.from(tenantName),
Optional.ofNullable(request.getProperty("application")).map(ApplicationName::from),
Optional.ofNullable(request.getProperty("instance")).map(InstanceName::from),
Optional.empty(), Optional.empty(), Optional.empty(), OptionalLong.empty());
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
controller.notificationsDb().listNotifications(notificationSource, showOnlyProductionInstances(request))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification));
return new SlimeJsonResponse(slime);
}
private static void toSlime(Cursor cursor, Notification notification) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
switch (type) {
case applicationPackage: return "applicationPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
}
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.history().stream().anyMatch(event -> "down".equals(event.getEvent())));
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.toSlime(supportAccess, false, Optional.of(controller.clock().instant())));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.toSlime(disallowed, false, Optional.of(controller.clock().instant())));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.targets(deploymentId.zoneId());
if (!legacyEndpoints) {
globalEndpoints = globalEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant())
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
List.of(controller.zoneRegistry().getConfigServerVipUri(zone)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
List.of(controller.zoneRegistry().getConfigServerVipUri(deploymentId.zoneId())),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
cloudTenant.archiveAccessRole().ifPresent(role -> object.setString("archiveAccessRole", role));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
}
else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double costDivisor = controller.zoneRegistry().system().isPublic() ? 1.0 : 3.0;
object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / costDivisor) / 100.0);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream())
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(Deployment::at)
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.system()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} |
Use SlimeUtils to throw if not exists? | private HttpResponse approveMembership(HttpRequest request, String user) {
AthenzUser athenzUser = AthenzUser.fromUserId(user);
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector inspector = SlimeUtils.jsonToSlime(jsonBytes).get();
ApplicationId applicationId = ApplicationId.fromSerializedForm(inspector.field("applicationId").asString());
ZoneId zone = ZoneId.from(inspector.field("zone").asString());
if(controller.supportAccess().allowDataplaneMembership(athenzUser, new DeploymentId(applicationId, zone))) {
return new AccessRequestResponse(controller.serviceRegistry().accessControlService().listMembers());
} else {
return new MessageResponse(400, "Unable to approve membership request");
}
} | ApplicationId applicationId = ApplicationId.fromSerializedForm(inspector.field("applicationId").asString()); | private HttpResponse approveMembership(HttpRequest request, String user) {
AthenzUser athenzUser = AthenzUser.fromUserId(user);
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector inspector = SlimeUtils.jsonToSlime(jsonBytes).get();
ApplicationId applicationId = requireField(inspector, "applicationId", ApplicationId::fromSerializedForm);
ZoneId zone = requireField(inspector, "zone", ZoneId::from);
if(controller.supportAccess().allowDataplaneMembership(athenzUser, new DeploymentId(applicationId, zone))) {
return new AccessRequestResponse(controller.serviceRegistry().accessControlService().listMembers());
} else {
return new MessageResponse(400, "Unable to approve membership request");
}
} | class ControllerApiHandler extends AuditLoggingRequestHandler {
private final ControllerMaintenance maintenance;
private final Controller controller;
public ControllerApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, ControllerMaintenance maintenance) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.maintenance = maintenance;
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return get(request);
case POST: return post(request);
case DELETE: return delete(request);
case PATCH: return patch(request);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse get(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/")) return root(request);
if (path.matches("/controller/v1/auditlog/")) return new AuditLogResponse(controller.auditLogger().readLog());
if (path.matches("/controller/v1/maintenance/")) return new JobsResponse(controller.jobControl());
if (path.matches("/controller/v1/stats")) return new StatsResponse(controller);
if (path.matches("/controller/v1/jobs/upgrader")) return new UpgraderResponse(maintenance.upgrader());
if (path.matches("/controller/v1/metering/tenant/{tenant}/month/{month}")) return new MeteringResponse(controller.serviceRegistry().meteringService(), path.get("tenant"), path.get("month"));
return notFound(path);
}
private HttpResponse post(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader/confidence/{version}")) return overrideConfidence(request, path.get("version"));
if (path.matches("/controller/v1/access/requests/{user}")) return approveMembership(request, path.get("user"));
if (path.matches("/controller/v1/access/grants/{user}")) return grantAccess(request, path.get("user"));
return notFound(path);
}
private HttpResponse grantAccess(HttpRequest request, String user) {
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector requestObject = SlimeUtils.jsonToSlime(jsonBytes).get();
X509Certificate certificate = X509CertificateUtils.fromPem(requestObject.field("certificate").asString());
ApplicationId applicationId = ApplicationId.fromSerializedForm(requestObject.field("applicationId").asString());
ZoneId zone = ZoneId.from(requestObject.field("zone").asString());
DeploymentId deployment = new DeploymentId(applicationId, zone);
SupportAccess supportAccess = controller.supportAccess().registerGrant(deployment, principal.getName(), certificate);
JobType jobType = JobType.from(controller.system(), deployment.zoneId())
.orElseThrow(() -> new IllegalStateException("No job found to trigger for " + deployment.toUserFriendlyString()));
String jobName = controller.applications().deploymentTrigger()
.reTrigger(deployment.applicationId(), jobType).type().jobName();
return new MessageResponse(String.format("Operator %s granted access and job %s triggered", principal.getName(), jobName));
}
private HttpResponse delete(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader/confidence/{version}")) return removeConfidenceOverride(path.get("version"));
return notFound(path);
}
private HttpResponse patch(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader")) return configureUpgrader(request);
return notFound(path);
}
private HttpResponse notFound(Path path) { return ErrorResponse.notFoundError("Nothing at " + path); }
private HttpResponse root(HttpRequest request) {
return new ResourceResponse(request, "auditlog", "maintenance", "stats", "jobs/upgrader", "metering/tenant");
}
private HttpResponse configureUpgrader(HttpRequest request) {
String upgradesPerMinuteField = "upgradesPerMinute";
String targetMajorVersionField = "targetMajorVersion";
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector inspect = SlimeUtils.jsonToSlime(jsonBytes).get();
Upgrader upgrader = maintenance.upgrader();
if (inspect.field(upgradesPerMinuteField).valid()) {
upgrader.setUpgradesPerMinute(inspect.field(upgradesPerMinuteField).asDouble());
} else if (inspect.field(targetMajorVersionField).valid()) {
int target = (int)inspect.field(targetMajorVersionField).asLong();
upgrader.setTargetMajorVersion(Optional.ofNullable(target == 0 ? null : target));
} else {
return ErrorResponse.badRequest("No such modifiable field(s)");
}
return new UpgraderResponse(maintenance.upgrader());
}
private HttpResponse removeConfidenceOverride(String version) {
maintenance.upgrader().removeConfidenceOverride(Version.fromString(version));
return new UpgraderResponse(maintenance.upgrader());
}
private HttpResponse overrideConfidence(HttpRequest request, String version) {
Confidence confidence = Confidence.valueOf(asString(request.getData()));
maintenance.upgrader().overrideConfidence(Version.fromString(version), confidence);
return new UpgraderResponse(maintenance.upgrader());
}
private static String asString(InputStream in) {
Scanner scanner = new Scanner(in).useDelimiter("\\A");
if (scanner.hasNext()) {
return scanner.next();
}
return "";
}
private static byte[] toJsonBytes(InputStream jsonStream) {
try {
return IOUtils.readBytes(jsonStream, 1000 * 1000);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
} | class ControllerApiHandler extends AuditLoggingRequestHandler {
private final ControllerMaintenance maintenance;
private final Controller controller;
public ControllerApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, ControllerMaintenance maintenance) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.maintenance = maintenance;
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return get(request);
case POST: return post(request);
case DELETE: return delete(request);
case PATCH: return patch(request);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse get(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/")) return root(request);
if (path.matches("/controller/v1/auditlog/")) return new AuditLogResponse(controller.auditLogger().readLog());
if (path.matches("/controller/v1/maintenance/")) return new JobsResponse(controller.jobControl());
if (path.matches("/controller/v1/stats")) return new StatsResponse(controller);
if (path.matches("/controller/v1/jobs/upgrader")) return new UpgraderResponse(maintenance.upgrader());
if (path.matches("/controller/v1/metering/tenant/{tenant}/month/{month}")) return new MeteringResponse(controller.serviceRegistry().meteringService(), path.get("tenant"), path.get("month"));
return notFound(path);
}
private HttpResponse post(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader/confidence/{version}")) return overrideConfidence(request, path.get("version"));
if (path.matches("/controller/v1/access/requests/{user}")) return approveMembership(request, path.get("user"));
if (path.matches("/controller/v1/access/grants/{user}")) return grantAccess(request, path.get("user"));
return notFound(path);
}
private HttpResponse grantAccess(HttpRequest request, String user) {
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector requestObject = SlimeUtils.jsonToSlime(jsonBytes).get();
X509Certificate certificate = requireField(requestObject, "certificate", X509CertificateUtils::fromPem);
ApplicationId applicationId = requireField(requestObject, "applicationId", ApplicationId::fromSerializedForm);
ZoneId zone = requireField(requestObject, "zone", ZoneId::from);
DeploymentId deployment = new DeploymentId(applicationId, zone);
SupportAccess supportAccess = controller.supportAccess().registerGrant(deployment, principal.getName(), certificate);
JobType jobType = JobType.from(controller.system(), deployment.zoneId())
.orElseThrow(() -> new IllegalStateException("No job found to trigger for " + deployment.toUserFriendlyString()));
String jobName = controller.applications().deploymentTrigger()
.reTrigger(deployment.applicationId(), jobType).type().jobName();
return new MessageResponse(String.format("Operator %s granted access and job %s triggered", principal.getName(), jobName));
}
private <T> T requireField(Inspector inspector, String field, Function<String, T> mapper) {
return SlimeUtils.optionalString(inspector.field(field))
.map(mapper::apply)
.orElseThrow(() -> new IllegalArgumentException("Expected field \"" + field + "\" in request"));
}
private HttpResponse delete(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader/confidence/{version}")) return removeConfidenceOverride(path.get("version"));
return notFound(path);
}
private HttpResponse patch(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader")) return configureUpgrader(request);
return notFound(path);
}
private HttpResponse notFound(Path path) { return ErrorResponse.notFoundError("Nothing at " + path); }
private HttpResponse root(HttpRequest request) {
return new ResourceResponse(request, "auditlog", "maintenance", "stats", "jobs/upgrader", "metering/tenant");
}
private HttpResponse configureUpgrader(HttpRequest request) {
String upgradesPerMinuteField = "upgradesPerMinute";
String targetMajorVersionField = "targetMajorVersion";
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector inspect = SlimeUtils.jsonToSlime(jsonBytes).get();
Upgrader upgrader = maintenance.upgrader();
if (inspect.field(upgradesPerMinuteField).valid()) {
upgrader.setUpgradesPerMinute(inspect.field(upgradesPerMinuteField).asDouble());
} else if (inspect.field(targetMajorVersionField).valid()) {
int target = (int)inspect.field(targetMajorVersionField).asLong();
upgrader.setTargetMajorVersion(Optional.ofNullable(target == 0 ? null : target));
} else {
return ErrorResponse.badRequest("No such modifiable field(s)");
}
return new UpgraderResponse(maintenance.upgrader());
}
private HttpResponse removeConfidenceOverride(String version) {
maintenance.upgrader().removeConfidenceOverride(Version.fromString(version));
return new UpgraderResponse(maintenance.upgrader());
}
private HttpResponse overrideConfidence(HttpRequest request, String version) {
Confidence confidence = Confidence.valueOf(asString(request.getData()));
maintenance.upgrader().overrideConfidence(Version.fromString(version), confidence);
return new UpgraderResponse(maintenance.upgrader());
}
private static String asString(InputStream in) {
Scanner scanner = new Scanner(in).useDelimiter("\\A");
if (scanner.hasNext()) {
return scanner.next();
}
return "";
}
private static byte[] toJsonBytes(InputStream jsonStream) {
try {
return IOUtils.readBytes(jsonStream, 1000 * 1000);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
} |
SlimeUtils | private HttpResponse approveMembership(HttpRequest request, String user) {
AthenzUser athenzUser = AthenzUser.fromUserId(user);
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector inspector = SlimeUtils.jsonToSlime(jsonBytes).get();
ApplicationId applicationId = ApplicationId.fromSerializedForm(inspector.field("applicationId").asString());
ZoneId zone = ZoneId.from(inspector.field("zone").asString());
if(controller.supportAccess().allowDataplaneMembership(athenzUser, new DeploymentId(applicationId, zone))) {
return new AccessRequestResponse(controller.serviceRegistry().accessControlService().listMembers());
} else {
return new MessageResponse(400, "Unable to approve membership request");
}
} | ZoneId zone = ZoneId.from(inspector.field("zone").asString()); | private HttpResponse approveMembership(HttpRequest request, String user) {
AthenzUser athenzUser = AthenzUser.fromUserId(user);
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector inspector = SlimeUtils.jsonToSlime(jsonBytes).get();
ApplicationId applicationId = requireField(inspector, "applicationId", ApplicationId::fromSerializedForm);
ZoneId zone = requireField(inspector, "zone", ZoneId::from);
if(controller.supportAccess().allowDataplaneMembership(athenzUser, new DeploymentId(applicationId, zone))) {
return new AccessRequestResponse(controller.serviceRegistry().accessControlService().listMembers());
} else {
return new MessageResponse(400, "Unable to approve membership request");
}
} | class ControllerApiHandler extends AuditLoggingRequestHandler {
private final ControllerMaintenance maintenance;
private final Controller controller;
public ControllerApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, ControllerMaintenance maintenance) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.maintenance = maintenance;
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return get(request);
case POST: return post(request);
case DELETE: return delete(request);
case PATCH: return patch(request);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse get(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/")) return root(request);
if (path.matches("/controller/v1/auditlog/")) return new AuditLogResponse(controller.auditLogger().readLog());
if (path.matches("/controller/v1/maintenance/")) return new JobsResponse(controller.jobControl());
if (path.matches("/controller/v1/stats")) return new StatsResponse(controller);
if (path.matches("/controller/v1/jobs/upgrader")) return new UpgraderResponse(maintenance.upgrader());
if (path.matches("/controller/v1/metering/tenant/{tenant}/month/{month}")) return new MeteringResponse(controller.serviceRegistry().meteringService(), path.get("tenant"), path.get("month"));
return notFound(path);
}
private HttpResponse post(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader/confidence/{version}")) return overrideConfidence(request, path.get("version"));
if (path.matches("/controller/v1/access/requests/{user}")) return approveMembership(request, path.get("user"));
if (path.matches("/controller/v1/access/grants/{user}")) return grantAccess(request, path.get("user"));
return notFound(path);
}
private HttpResponse grantAccess(HttpRequest request, String user) {
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector requestObject = SlimeUtils.jsonToSlime(jsonBytes).get();
X509Certificate certificate = X509CertificateUtils.fromPem(requestObject.field("certificate").asString());
ApplicationId applicationId = ApplicationId.fromSerializedForm(requestObject.field("applicationId").asString());
ZoneId zone = ZoneId.from(requestObject.field("zone").asString());
DeploymentId deployment = new DeploymentId(applicationId, zone);
SupportAccess supportAccess = controller.supportAccess().registerGrant(deployment, principal.getName(), certificate);
JobType jobType = JobType.from(controller.system(), deployment.zoneId())
.orElseThrow(() -> new IllegalStateException("No job found to trigger for " + deployment.toUserFriendlyString()));
String jobName = controller.applications().deploymentTrigger()
.reTrigger(deployment.applicationId(), jobType).type().jobName();
return new MessageResponse(String.format("Operator %s granted access and job %s triggered", principal.getName(), jobName));
}
private HttpResponse delete(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader/confidence/{version}")) return removeConfidenceOverride(path.get("version"));
return notFound(path);
}
private HttpResponse patch(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader")) return configureUpgrader(request);
return notFound(path);
}
private HttpResponse notFound(Path path) { return ErrorResponse.notFoundError("Nothing at " + path); }
private HttpResponse root(HttpRequest request) {
return new ResourceResponse(request, "auditlog", "maintenance", "stats", "jobs/upgrader", "metering/tenant");
}
private HttpResponse configureUpgrader(HttpRequest request) {
String upgradesPerMinuteField = "upgradesPerMinute";
String targetMajorVersionField = "targetMajorVersion";
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector inspect = SlimeUtils.jsonToSlime(jsonBytes).get();
Upgrader upgrader = maintenance.upgrader();
if (inspect.field(upgradesPerMinuteField).valid()) {
upgrader.setUpgradesPerMinute(inspect.field(upgradesPerMinuteField).asDouble());
} else if (inspect.field(targetMajorVersionField).valid()) {
int target = (int)inspect.field(targetMajorVersionField).asLong();
upgrader.setTargetMajorVersion(Optional.ofNullable(target == 0 ? null : target));
} else {
return ErrorResponse.badRequest("No such modifiable field(s)");
}
return new UpgraderResponse(maintenance.upgrader());
}
private HttpResponse removeConfidenceOverride(String version) {
maintenance.upgrader().removeConfidenceOverride(Version.fromString(version));
return new UpgraderResponse(maintenance.upgrader());
}
private HttpResponse overrideConfidence(HttpRequest request, String version) {
Confidence confidence = Confidence.valueOf(asString(request.getData()));
maintenance.upgrader().overrideConfidence(Version.fromString(version), confidence);
return new UpgraderResponse(maintenance.upgrader());
}
private static String asString(InputStream in) {
Scanner scanner = new Scanner(in).useDelimiter("\\A");
if (scanner.hasNext()) {
return scanner.next();
}
return "";
}
private static byte[] toJsonBytes(InputStream jsonStream) {
try {
return IOUtils.readBytes(jsonStream, 1000 * 1000);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
} | class ControllerApiHandler extends AuditLoggingRequestHandler {
private final ControllerMaintenance maintenance;
private final Controller controller;
public ControllerApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, ControllerMaintenance maintenance) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.maintenance = maintenance;
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return get(request);
case POST: return post(request);
case DELETE: return delete(request);
case PATCH: return patch(request);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse get(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/")) return root(request);
if (path.matches("/controller/v1/auditlog/")) return new AuditLogResponse(controller.auditLogger().readLog());
if (path.matches("/controller/v1/maintenance/")) return new JobsResponse(controller.jobControl());
if (path.matches("/controller/v1/stats")) return new StatsResponse(controller);
if (path.matches("/controller/v1/jobs/upgrader")) return new UpgraderResponse(maintenance.upgrader());
if (path.matches("/controller/v1/metering/tenant/{tenant}/month/{month}")) return new MeteringResponse(controller.serviceRegistry().meteringService(), path.get("tenant"), path.get("month"));
return notFound(path);
}
private HttpResponse post(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader/confidence/{version}")) return overrideConfidence(request, path.get("version"));
if (path.matches("/controller/v1/access/requests/{user}")) return approveMembership(request, path.get("user"));
if (path.matches("/controller/v1/access/grants/{user}")) return grantAccess(request, path.get("user"));
return notFound(path);
}
private HttpResponse grantAccess(HttpRequest request, String user) {
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector requestObject = SlimeUtils.jsonToSlime(jsonBytes).get();
X509Certificate certificate = requireField(requestObject, "certificate", X509CertificateUtils::fromPem);
ApplicationId applicationId = requireField(requestObject, "applicationId", ApplicationId::fromSerializedForm);
ZoneId zone = requireField(requestObject, "zone", ZoneId::from);
DeploymentId deployment = new DeploymentId(applicationId, zone);
SupportAccess supportAccess = controller.supportAccess().registerGrant(deployment, principal.getName(), certificate);
JobType jobType = JobType.from(controller.system(), deployment.zoneId())
.orElseThrow(() -> new IllegalStateException("No job found to trigger for " + deployment.toUserFriendlyString()));
String jobName = controller.applications().deploymentTrigger()
.reTrigger(deployment.applicationId(), jobType).type().jobName();
return new MessageResponse(String.format("Operator %s granted access and job %s triggered", principal.getName(), jobName));
}
private <T> T requireField(Inspector inspector, String field, Function<String, T> mapper) {
return SlimeUtils.optionalString(inspector.field(field))
.map(mapper::apply)
.orElseThrow(() -> new IllegalArgumentException("Expected field \"" + field + "\" in request"));
}
private HttpResponse delete(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader/confidence/{version}")) return removeConfidenceOverride(path.get("version"));
return notFound(path);
}
private HttpResponse patch(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader")) return configureUpgrader(request);
return notFound(path);
}
private HttpResponse notFound(Path path) { return ErrorResponse.notFoundError("Nothing at " + path); }
private HttpResponse root(HttpRequest request) {
return new ResourceResponse(request, "auditlog", "maintenance", "stats", "jobs/upgrader", "metering/tenant");
}
private HttpResponse configureUpgrader(HttpRequest request) {
String upgradesPerMinuteField = "upgradesPerMinute";
String targetMajorVersionField = "targetMajorVersion";
byte[] jsonBytes = toJsonBytes(request.getData());
Inspector inspect = SlimeUtils.jsonToSlime(jsonBytes).get();
Upgrader upgrader = maintenance.upgrader();
if (inspect.field(upgradesPerMinuteField).valid()) {
upgrader.setUpgradesPerMinute(inspect.field(upgradesPerMinuteField).asDouble());
} else if (inspect.field(targetMajorVersionField).valid()) {
int target = (int)inspect.field(targetMajorVersionField).asLong();
upgrader.setTargetMajorVersion(Optional.ofNullable(target == 0 ? null : target));
} else {
return ErrorResponse.badRequest("No such modifiable field(s)");
}
return new UpgraderResponse(maintenance.upgrader());
}
private HttpResponse removeConfidenceOverride(String version) {
maintenance.upgrader().removeConfidenceOverride(Version.fromString(version));
return new UpgraderResponse(maintenance.upgrader());
}
private HttpResponse overrideConfidence(HttpRequest request, String version) {
Confidence confidence = Confidence.valueOf(asString(request.getData()));
maintenance.upgrader().overrideConfidence(Version.fromString(version), confidence);
return new UpgraderResponse(maintenance.upgrader());
}
private static String asString(InputStream in) {
Scanner scanner = new Scanner(in).useDelimiter("\\A");
if (scanner.hasNext()) {
return scanner.next();
}
return "";
}
private static byte[] toJsonBytes(InputStream jsonStream) {
try {
return IOUtils.readBytes(jsonStream, 1000 * 1000);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
} |
Consider simplifying by checking if min is specified first ```suggestion if (isEmpty() || !min.nodeResources().isUnspecified()) return this; NodeResources defaultResources = new CapacityPolicies(nodeRepository).defaultNodeResources(type); return new Limits(min.with(defaultResources), max.with(defaultResources)); ``` | public Limits fullySpecified(ClusterSpec.Type type, NodeRepository nodeRepository) {
if (this.isEmpty()) return this;
CapacityPolicies capacityPolicies = new CapacityPolicies(nodeRepository);
var specifiedMin = min.with(min.nodeResources().isUnspecified() ?
capacityPolicies.defaultNodeResources(type) : min.nodeResources());
var specifiedMax = max.with(max.nodeResources().isUnspecified() ?
capacityPolicies.defaultNodeResources(type) : max.nodeResources());
return new Limits(specifiedMin, specifiedMax);
} | return new Limits(specifiedMin, specifiedMax); | public Limits fullySpecified(ClusterSpec.Type type, NodeRepository nodeRepository) {
if (this.isEmpty()) return this;
CapacityPolicies capacityPolicies = new CapacityPolicies(nodeRepository);
var specifiedMin = min.with(min.nodeResources().isUnspecified() ?
capacityPolicies.defaultNodeResources(type) : min.nodeResources());
var specifiedMax = max.with(max.nodeResources().isUnspecified() ?
capacityPolicies.defaultNodeResources(type) : max.nodeResources());
return new Limits(specifiedMin, specifiedMax);
} | class Limits {
private static final Limits empty = new Limits(null, null);
private final ClusterResources min, max;
private Limits(ClusterResources min, ClusterResources max) {
this.min = min;
this.max = max;
}
public static Limits empty() { return empty; }
public boolean isEmpty() { return this == empty; }
public ClusterResources min() {
if (isEmpty()) throw new IllegalStateException("Empty: No min");
return min;
}
public ClusterResources max() {
if (isEmpty()) throw new IllegalStateException("Empty: No max");
return max;
}
public Limits withMin(ClusterResources min) {
return new Limits(min, max);
}
public Limits withMax(ClusterResources max) {
return new Limits(min, max);
}
/** Caps the given resources at the limits of this. If it is empty the node resources are returned as-is */
public NodeResources cap(NodeResources resources) {
if (isEmpty()) return resources;
if (min.nodeResources().isUnspecified()) return resources;
resources = resources.withVcpu(between(min.nodeResources().vcpu(), max.nodeResources().vcpu(), resources.vcpu()));
resources = resources.withMemoryGb(between(min.nodeResources().memoryGb(), max.nodeResources().memoryGb(), resources.memoryGb()));
resources = resources.withDiskGb(between(min.nodeResources().diskGb(), max.nodeResources().diskGb(), resources.diskGb()));
return resources;
}
private double between(double min, double max, double value) {
value = Math.max(min, value);
value = Math.min(max, value);
return value;
}
public static Limits of(Cluster cluster) {
return new Limits(cluster.minResources(), cluster.maxResources());
}
public static Limits of(Capacity capacity) {
return new Limits(capacity.minResources(), capacity.maxResources());
}
public static Limits of(ClusterResources min, ClusterResources max) {
return new Limits(Objects.requireNonNull(min, "min"), Objects.requireNonNull(max, "max"));
}
@Override
public String toString() {
if (isEmpty()) return "no limits";
return "limits: from " + min + " to " + max;
}
} | class Limits {
private static final Limits empty = new Limits(null, null);
private final ClusterResources min, max;
private Limits(ClusterResources min, ClusterResources max) {
this.min = min;
this.max = max;
}
public static Limits empty() { return empty; }
public boolean isEmpty() { return this == empty; }
public ClusterResources min() {
if (isEmpty()) throw new IllegalStateException("Empty: No min");
return min;
}
public ClusterResources max() {
if (isEmpty()) throw new IllegalStateException("Empty: No max");
return max;
}
public Limits withMin(ClusterResources min) {
return new Limits(min, max);
}
public Limits withMax(ClusterResources max) {
return new Limits(min, max);
}
/** Caps the given resources at the limits of this. If it is empty the node resources are returned as-is */
public NodeResources cap(NodeResources resources) {
if (isEmpty()) return resources;
if (min.nodeResources().isUnspecified()) return resources;
resources = resources.withVcpu(between(min.nodeResources().vcpu(), max.nodeResources().vcpu(), resources.vcpu()));
resources = resources.withMemoryGb(between(min.nodeResources().memoryGb(), max.nodeResources().memoryGb(), resources.memoryGb()));
resources = resources.withDiskGb(between(min.nodeResources().diskGb(), max.nodeResources().diskGb(), resources.diskGb()));
return resources;
}
private double between(double min, double max, double value) {
value = Math.max(min, value);
value = Math.min(max, value);
return value;
}
public static Limits of(Cluster cluster) {
return new Limits(cluster.minResources(), cluster.maxResources());
}
public static Limits of(Capacity capacity) {
return new Limits(capacity.minResources(), capacity.maxResources());
}
public static Limits of(ClusterResources min, ClusterResources max) {
return new Limits(Objects.requireNonNull(min, "min"), Objects.requireNonNull(max, "max"));
}
@Override
public String toString() {
if (isEmpty()) return "no limits";
return "limits: from " + min + " to " + max;
}
} |
Same here | private static int getStatusCode(Throwable t) {
if (t instanceof BindingNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof BindingSetNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof RequestException) {
return ((RequestException)t).getResponseStatus();
} else if (t instanceof TimeoutException) {
return HttpServletResponse.SC_SERVICE_UNAVAILABLE;
} else {
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
} | return HttpServletResponse.SC_SERVICE_UNAVAILABLE; | private static int getStatusCode(Throwable t) {
if (t instanceof BindingNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof BindingSetNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof RequestException) {
return ((RequestException)t).getResponseStatus();
} else if (t instanceof TimeoutException) {
return HttpServletResponse.SC_SERVICE_UNAVAILABLE;
} else {
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
} | class ServletResponseController {
private static Logger log = Logger.getLogger(ServletResponseController.class.getName());
/**
* The servlet spec does not require (Http)ServletResponse nor ServletOutputStream to be thread-safe. Therefore,
* we must provide our own synchronization, since we may attempt to access these objects simultaneously from
* different threads. (The typical cause of this is when one thread is writing a response while another thread
* throws an exception, causing the request to fail with an error response).
*/
private final Object monitor = new Object();
private final HttpServletRequest servletRequest;
private final HttpServletResponse servletResponse;
private final boolean developerMode;
private final ErrorResponseContentCreator errorResponseContentCreator = new ErrorResponseContentCreator();
private final ServletOutputStreamWriter servletOutputStreamWriter;
private boolean responseCommitted = false;
public ServletResponseController(
HttpServletRequest servletRequest,
HttpServletResponse servletResponse,
Janitor janitor,
RequestMetricReporter metricReporter,
boolean developerMode) throws IOException {
this.servletRequest = servletRequest;
this.servletResponse = servletResponse;
this.developerMode = developerMode;
this.servletOutputStreamWriter =
new ServletOutputStreamWriter(servletResponse.getOutputStream(), janitor, metricReporter);
}
private static String getReasonPhrase(Throwable t, boolean developerMode) {
if (developerMode) {
final StringWriter out = new StringWriter();
t.printStackTrace(new PrintWriter(out));
return out.toString();
} else if (t.getMessage() != null) {
return t.getMessage();
} else {
return t.toString();
}
}
public void trySendError(Throwable t) {
final boolean responseWasCommitted;
try {
synchronized (monitor) {
String reasonPhrase = getReasonPhrase(t, developerMode);
int statusCode = getStatusCode(t);
responseWasCommitted = responseCommitted;
if (!responseCommitted) {
responseCommitted = true;
sendErrorAsync(statusCode, reasonPhrase);
}
}
} catch (Throwable e) {
servletOutputStreamWriter.fail(t);
return;
}
if (responseWasCommitted) {
RuntimeException exceptionWithStackTrace = new RuntimeException(t);
log.log(Level.FINE, "Response already committed, can't change response code", exceptionWithStackTrace);
servletOutputStreamWriter.close();
}
}
/**
* Async version of {@link org.eclipse.jetty.server.Response
*/
private void sendErrorAsync(int statusCode, String reasonPhrase) {
servletResponse.setHeader(HttpHeaders.Names.EXPIRES, null);
servletResponse.setHeader(HttpHeaders.Names.LAST_MODIFIED, null);
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_TYPE, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_LENGTH, null);
setStatus(servletResponse, statusCode, Optional.of(reasonPhrase));
if (statusCode != HttpServletResponse.SC_NO_CONTENT &&
statusCode != HttpServletResponse.SC_NOT_MODIFIED &&
statusCode != HttpServletResponse.SC_PARTIAL_CONTENT &&
statusCode >= HttpServletResponse.SC_OK) {
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, "must-revalidate,no-cache,no-store");
servletResponse.setContentType(MimeTypes.Type.TEXT_HTML_8859_1.toString());
byte[] errorContent = errorResponseContentCreator
.createErrorContent(servletRequest.getRequestURI(), statusCode, Optional.ofNullable(reasonPhrase));
servletResponse.setContentLength(errorContent.length);
servletOutputStreamWriter.sendErrorContentAndCloseAsync(ByteBuffer.wrap(errorContent));
} else {
servletResponse.setContentLength(0);
servletOutputStreamWriter.close();
}
}
/**
* When this future completes there will be no more calls against the servlet output stream or servlet response.
* The framework is still allowed to invoke us though.
*
* The future might complete in the servlet framework thread, user thread or executor thread.
*/
public CompletableFuture<Void> finishedFuture() {
return servletOutputStreamWriter.finishedFuture;
}
private void setResponse(Response jdiscResponse) {
synchronized (monitor) {
servletRequest.setAttribute(HttpResponseStatisticsCollector.requestTypeAttribute, jdiscResponse.getRequestType());
if (responseCommitted) {
log.log(Level.FINE,
jdiscResponse.getError(),
() -> "Response already committed, can't change response code. " +
"From: " + servletResponse.getStatus() + ", To: " + jdiscResponse.getStatus());
servletOutputStreamWriter.close();
return;
}
setStatus_holdingLock(jdiscResponse, servletResponse);
setHeaders_holdingLock(jdiscResponse, servletResponse);
}
}
private static void setHeaders_holdingLock(Response jdiscResponse, HttpServletResponse servletResponse) {
for (final Map.Entry<String, String> entry : jdiscResponse.headers().entries()) {
servletResponse.addHeader(entry.getKey(), entry.getValue());
}
if (servletResponse.getContentType() == null) {
servletResponse.setContentType("text/plain;charset=utf-8");
}
}
private static void setStatus_holdingLock(Response jdiscResponse, HttpServletResponse servletResponse) {
if (jdiscResponse instanceof HttpResponse) {
setStatus(servletResponse, jdiscResponse.getStatus(), Optional.ofNullable(((HttpResponse) jdiscResponse).getMessage()));
} else {
setStatus(servletResponse, jdiscResponse.getStatus(), getErrorMessage(jdiscResponse));
}
}
@SuppressWarnings("deprecation")
private static void setStatus(HttpServletResponse response, int statusCode, Optional<String> reasonPhrase) {
if (reasonPhrase.isPresent()) {
response.setStatus(statusCode, reasonPhrase.get());
} else {
response.setStatus(statusCode);
}
}
private static Optional<String> getErrorMessage(Response jdiscResponse) {
return Optional.ofNullable(jdiscResponse.getError()).flatMap(
error -> Optional.ofNullable(error.getMessage()));
}
private void commitResponse() {
synchronized (monitor) {
responseCommitted = true;
}
}
public final ResponseHandler responseHandler = new ResponseHandler() {
@Override
public ContentChannel handleResponse(Response response) {
setResponse(response);
return responseContentChannel;
}
};
public final ContentChannel responseContentChannel = new ContentChannel() {
@Override
public void write(ByteBuffer buf, CompletionHandler handler) {
commitResponse();
servletOutputStreamWriter.writeBuffer(buf, handlerOrNoopHandler(handler));
}
@Override
public void close(CompletionHandler handler) {
commitResponse();
servletOutputStreamWriter.close(handlerOrNoopHandler(handler));
}
private CompletionHandler handlerOrNoopHandler(CompletionHandler handler) {
return handler != null ? handler : NOOP_COMPLETION_HANDLER;
}
};
} | class ServletResponseController {
private static Logger log = Logger.getLogger(ServletResponseController.class.getName());
/**
* The servlet spec does not require (Http)ServletResponse nor ServletOutputStream to be thread-safe. Therefore,
* we must provide our own synchronization, since we may attempt to access these objects simultaneously from
* different threads. (The typical cause of this is when one thread is writing a response while another thread
* throws an exception, causing the request to fail with an error response).
*/
private final Object monitor = new Object();
private final HttpServletRequest servletRequest;
private final HttpServletResponse servletResponse;
private final boolean developerMode;
private final ErrorResponseContentCreator errorResponseContentCreator = new ErrorResponseContentCreator();
private final ServletOutputStreamWriter servletOutputStreamWriter;
private boolean responseCommitted = false;
public ServletResponseController(
HttpServletRequest servletRequest,
HttpServletResponse servletResponse,
Janitor janitor,
RequestMetricReporter metricReporter,
boolean developerMode) throws IOException {
this.servletRequest = servletRequest;
this.servletResponse = servletResponse;
this.developerMode = developerMode;
this.servletOutputStreamWriter =
new ServletOutputStreamWriter(servletResponse.getOutputStream(), janitor, metricReporter);
}
private static String getReasonPhrase(Throwable t, boolean developerMode) {
if (developerMode) {
final StringWriter out = new StringWriter();
t.printStackTrace(new PrintWriter(out));
return out.toString();
} else if (t.getMessage() != null) {
return t.getMessage();
} else {
return t.toString();
}
}
public void trySendError(Throwable t) {
final boolean responseWasCommitted;
try {
synchronized (monitor) {
String reasonPhrase = getReasonPhrase(t, developerMode);
int statusCode = getStatusCode(t);
responseWasCommitted = responseCommitted;
if (!responseCommitted) {
responseCommitted = true;
sendErrorAsync(statusCode, reasonPhrase);
}
}
} catch (Throwable e) {
servletOutputStreamWriter.fail(t);
return;
}
if (responseWasCommitted) {
RuntimeException exceptionWithStackTrace = new RuntimeException(t);
log.log(Level.FINE, "Response already committed, can't change response code", exceptionWithStackTrace);
servletOutputStreamWriter.close();
}
}
/**
* Async version of {@link org.eclipse.jetty.server.Response
*/
private void sendErrorAsync(int statusCode, String reasonPhrase) {
servletResponse.setHeader(HttpHeaders.Names.EXPIRES, null);
servletResponse.setHeader(HttpHeaders.Names.LAST_MODIFIED, null);
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_TYPE, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_LENGTH, null);
setStatus(servletResponse, statusCode, Optional.of(reasonPhrase));
if (statusCode != HttpServletResponse.SC_NO_CONTENT &&
statusCode != HttpServletResponse.SC_NOT_MODIFIED &&
statusCode != HttpServletResponse.SC_PARTIAL_CONTENT &&
statusCode >= HttpServletResponse.SC_OK) {
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, "must-revalidate,no-cache,no-store");
servletResponse.setContentType(MimeTypes.Type.TEXT_HTML_8859_1.toString());
byte[] errorContent = errorResponseContentCreator
.createErrorContent(servletRequest.getRequestURI(), statusCode, Optional.ofNullable(reasonPhrase));
servletResponse.setContentLength(errorContent.length);
servletOutputStreamWriter.sendErrorContentAndCloseAsync(ByteBuffer.wrap(errorContent));
} else {
servletResponse.setContentLength(0);
servletOutputStreamWriter.close();
}
}
/**
* When this future completes there will be no more calls against the servlet output stream or servlet response.
* The framework is still allowed to invoke us though.
*
* The future might complete in the servlet framework thread, user thread or executor thread.
*/
public CompletableFuture<Void> finishedFuture() {
return servletOutputStreamWriter.finishedFuture;
}
private void setResponse(Response jdiscResponse) {
synchronized (monitor) {
servletRequest.setAttribute(HttpResponseStatisticsCollector.requestTypeAttribute, jdiscResponse.getRequestType());
if (responseCommitted) {
log.log(Level.FINE,
jdiscResponse.getError(),
() -> "Response already committed, can't change response code. " +
"From: " + servletResponse.getStatus() + ", To: " + jdiscResponse.getStatus());
servletOutputStreamWriter.close();
return;
}
setStatus_holdingLock(jdiscResponse, servletResponse);
setHeaders_holdingLock(jdiscResponse, servletResponse);
}
}
private static void setHeaders_holdingLock(Response jdiscResponse, HttpServletResponse servletResponse) {
for (final Map.Entry<String, String> entry : jdiscResponse.headers().entries()) {
servletResponse.addHeader(entry.getKey(), entry.getValue());
}
if (servletResponse.getContentType() == null) {
servletResponse.setContentType("text/plain;charset=utf-8");
}
}
private static void setStatus_holdingLock(Response jdiscResponse, HttpServletResponse servletResponse) {
if (jdiscResponse instanceof HttpResponse) {
setStatus(servletResponse, jdiscResponse.getStatus(), Optional.ofNullable(((HttpResponse) jdiscResponse).getMessage()));
} else {
setStatus(servletResponse, jdiscResponse.getStatus(), getErrorMessage(jdiscResponse));
}
}
@SuppressWarnings("deprecation")
private static void setStatus(HttpServletResponse response, int statusCode, Optional<String> reasonPhrase) {
if (reasonPhrase.isPresent()) {
response.setStatus(statusCode, reasonPhrase.get());
} else {
response.setStatus(statusCode);
}
}
private static Optional<String> getErrorMessage(Response jdiscResponse) {
return Optional.ofNullable(jdiscResponse.getError()).flatMap(
error -> Optional.ofNullable(error.getMessage()));
}
private void commitResponse() {
synchronized (monitor) {
responseCommitted = true;
}
}
public final ResponseHandler responseHandler = new ResponseHandler() {
@Override
public ContentChannel handleResponse(Response response) {
setResponse(response);
return responseContentChannel;
}
};
public final ContentChannel responseContentChannel = new ContentChannel() {
@Override
public void write(ByteBuffer buf, CompletionHandler handler) {
commitResponse();
servletOutputStreamWriter.writeBuffer(buf, handlerOrNoopHandler(handler));
}
@Override
public void close(CompletionHandler handler) {
commitResponse();
servletOutputStreamWriter.close(handlerOrNoopHandler(handler));
}
private CompletionHandler handlerOrNoopHandler(CompletionHandler handler) {
return handler != null ? handler : NOOP_COMPLETION_HANDLER;
}
};
} |
Here is was easier to count successes than failures. | protected double maintain() {
if ( ! nodeRepository().zone().environment().isProduction()) return 1.0;
int attempts = 0;
int successes = 0;
for (var application : activeNodesByApplication().entrySet()) {
attempts++;
successes += suggest(application.getKey(), application.getValue());
}
return attempts == 0 ? 1.0 : ((double)successes / attempts);
} | return attempts == 0 ? 1.0 : ((double)successes / attempts); | protected double maintain() {
if ( ! nodeRepository().zone().environment().isProduction()) return 1.0;
int attempts = 0;
int successes = 0;
for (var application : activeNodesByApplication().entrySet()) {
attempts++;
successes += suggest(application.getKey(), application.getValue());
}
return attempts == 0 ? 1.0 : ((double)successes / attempts);
} | class ScalingSuggestionsMaintainer extends NodeRepositoryMaintainer {
private final Autoscaler autoscaler;
public ScalingSuggestionsMaintainer(NodeRepository nodeRepository,
Duration interval,
Metric metric) {
super(nodeRepository, interval, metric);
this.autoscaler = new Autoscaler(nodeRepository);
}
@Override
private int suggest(ApplicationId application, NodeList applicationNodes) {
int successes = 0;
for (var cluster : nodesByCluster(applicationNodes).entrySet())
successes += suggest(application, cluster.getKey(), cluster.getValue()) ? 1 : 0;
return successes;
}
private Applications applications() {
return nodeRepository().applications();
}
private boolean suggest(ApplicationId applicationId,
ClusterSpec.Id clusterId,
NodeList clusterNodes) {
Application application = applications().get(applicationId).orElse(Application.empty(applicationId));
Optional<Cluster> cluster = application.cluster(clusterId);
if (cluster.isEmpty()) return true;
var suggestion = autoscaler.suggest(application, cluster.get(), clusterNodes);
if (suggestion.isEmpty()) return true;
try (Mutex lock = nodeRepository().nodes().lock(applicationId, Duration.ofSeconds(1))) {
var suggestedResources = suggestion.target().orElse(clusterNodes.not().retired().toResources());
applications().get(applicationId).ifPresent(a -> updateSuggestion(suggestedResources, clusterId, a, lock));
return true;
}
catch (ApplicationLockException e) {
return false;
}
}
private void updateSuggestion(ClusterResources suggestion,
ClusterSpec.Id clusterId,
Application application,
Mutex lock) {
Optional<Cluster> cluster = application.cluster(clusterId);
if (cluster.isEmpty()) return;
var at = nodeRepository().clock().instant();
var currentSuggestion = cluster.get().suggestedResources();
if (currentSuggestion.isEmpty()
|| currentSuggestion.get().at().isBefore(at.minus(Duration.ofDays(7)))
|| isHigher(suggestion, currentSuggestion.get().resources()))
applications().put(application.with(cluster.get().withSuggested(Optional.of(new Cluster.Suggestion(suggestion, at)))), lock);
}
private boolean isHigher(ClusterResources r1, ClusterResources r2) {
return r1.totalResources().cost() > r2.totalResources().cost();
}
private Map<ClusterSpec.Id, NodeList> nodesByCluster(NodeList applicationNodes) {
return applicationNodes.groupingBy(n -> n.allocation().get().membership().cluster().id());
}
} | class ScalingSuggestionsMaintainer extends NodeRepositoryMaintainer {
private final Autoscaler autoscaler;
public ScalingSuggestionsMaintainer(NodeRepository nodeRepository,
Duration interval,
Metric metric) {
super(nodeRepository, interval, metric);
this.autoscaler = new Autoscaler(nodeRepository);
}
@Override
private int suggest(ApplicationId application, NodeList applicationNodes) {
int successes = 0;
for (var cluster : nodesByCluster(applicationNodes).entrySet())
successes += suggest(application, cluster.getKey(), cluster.getValue()) ? 1 : 0;
return successes;
}
private Applications applications() {
return nodeRepository().applications();
}
private boolean suggest(ApplicationId applicationId,
ClusterSpec.Id clusterId,
NodeList clusterNodes) {
Application application = applications().get(applicationId).orElse(Application.empty(applicationId));
Optional<Cluster> cluster = application.cluster(clusterId);
if (cluster.isEmpty()) return true;
var suggestion = autoscaler.suggest(application, cluster.get(), clusterNodes);
if (suggestion.isEmpty()) return true;
try (Mutex lock = nodeRepository().nodes().lock(applicationId, Duration.ofSeconds(1))) {
var suggestedResources = suggestion.target().orElse(clusterNodes.not().retired().toResources());
applications().get(applicationId).ifPresent(a -> updateSuggestion(suggestedResources, clusterId, a, lock));
return true;
}
catch (ApplicationLockException e) {
return false;
}
}
private void updateSuggestion(ClusterResources suggestion,
ClusterSpec.Id clusterId,
Application application,
Mutex lock) {
Optional<Cluster> cluster = application.cluster(clusterId);
if (cluster.isEmpty()) return;
var at = nodeRepository().clock().instant();
var currentSuggestion = cluster.get().suggestedResources();
if (currentSuggestion.isEmpty()
|| currentSuggestion.get().at().isBefore(at.minus(Duration.ofDays(7)))
|| isHigher(suggestion, currentSuggestion.get().resources()))
applications().put(application.with(cluster.get().withSuggested(Optional.of(new Cluster.Suggestion(suggestion, at)))), lock);
}
private boolean isHigher(ClusterResources r1, ClusterResources r2) {
return r1.totalResources().cost() > r2.totalResources().cost();
}
private Map<ClusterSpec.Id, NodeList> nodesByCluster(NodeList applicationNodes) {
return applicationNodes.groupingBy(n -> n.allocation().get().membership().cluster().id());
}
} |
Same answer as above :) | private static int getStatusCode(Throwable t) {
if (t instanceof BindingNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof BindingSetNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof RequestException) {
return ((RequestException)t).getResponseStatus();
} else if (t instanceof TimeoutException) {
return HttpServletResponse.SC_SERVICE_UNAVAILABLE;
} else {
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
} | return HttpServletResponse.SC_SERVICE_UNAVAILABLE; | private static int getStatusCode(Throwable t) {
if (t instanceof BindingNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof BindingSetNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof RequestException) {
return ((RequestException)t).getResponseStatus();
} else if (t instanceof TimeoutException) {
return HttpServletResponse.SC_SERVICE_UNAVAILABLE;
} else {
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
} | class ServletResponseController {
private static Logger log = Logger.getLogger(ServletResponseController.class.getName());
/**
* The servlet spec does not require (Http)ServletResponse nor ServletOutputStream to be thread-safe. Therefore,
* we must provide our own synchronization, since we may attempt to access these objects simultaneously from
* different threads. (The typical cause of this is when one thread is writing a response while another thread
* throws an exception, causing the request to fail with an error response).
*/
private final Object monitor = new Object();
private final HttpServletRequest servletRequest;
private final HttpServletResponse servletResponse;
private final boolean developerMode;
private final ErrorResponseContentCreator errorResponseContentCreator = new ErrorResponseContentCreator();
private final ServletOutputStreamWriter servletOutputStreamWriter;
private boolean responseCommitted = false;
public ServletResponseController(
HttpServletRequest servletRequest,
HttpServletResponse servletResponse,
Janitor janitor,
RequestMetricReporter metricReporter,
boolean developerMode) throws IOException {
this.servletRequest = servletRequest;
this.servletResponse = servletResponse;
this.developerMode = developerMode;
this.servletOutputStreamWriter =
new ServletOutputStreamWriter(servletResponse.getOutputStream(), janitor, metricReporter);
}
private static String getReasonPhrase(Throwable t, boolean developerMode) {
if (developerMode) {
final StringWriter out = new StringWriter();
t.printStackTrace(new PrintWriter(out));
return out.toString();
} else if (t.getMessage() != null) {
return t.getMessage();
} else {
return t.toString();
}
}
public void trySendError(Throwable t) {
final boolean responseWasCommitted;
try {
synchronized (monitor) {
String reasonPhrase = getReasonPhrase(t, developerMode);
int statusCode = getStatusCode(t);
responseWasCommitted = responseCommitted;
if (!responseCommitted) {
responseCommitted = true;
sendErrorAsync(statusCode, reasonPhrase);
}
}
} catch (Throwable e) {
servletOutputStreamWriter.fail(t);
return;
}
if (responseWasCommitted) {
RuntimeException exceptionWithStackTrace = new RuntimeException(t);
log.log(Level.FINE, "Response already committed, can't change response code", exceptionWithStackTrace);
servletOutputStreamWriter.close();
}
}
/**
* Async version of {@link org.eclipse.jetty.server.Response
*/
private void sendErrorAsync(int statusCode, String reasonPhrase) {
servletResponse.setHeader(HttpHeaders.Names.EXPIRES, null);
servletResponse.setHeader(HttpHeaders.Names.LAST_MODIFIED, null);
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_TYPE, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_LENGTH, null);
setStatus(servletResponse, statusCode, Optional.of(reasonPhrase));
if (statusCode != HttpServletResponse.SC_NO_CONTENT &&
statusCode != HttpServletResponse.SC_NOT_MODIFIED &&
statusCode != HttpServletResponse.SC_PARTIAL_CONTENT &&
statusCode >= HttpServletResponse.SC_OK) {
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, "must-revalidate,no-cache,no-store");
servletResponse.setContentType(MimeTypes.Type.TEXT_HTML_8859_1.toString());
byte[] errorContent = errorResponseContentCreator
.createErrorContent(servletRequest.getRequestURI(), statusCode, Optional.ofNullable(reasonPhrase));
servletResponse.setContentLength(errorContent.length);
servletOutputStreamWriter.sendErrorContentAndCloseAsync(ByteBuffer.wrap(errorContent));
} else {
servletResponse.setContentLength(0);
servletOutputStreamWriter.close();
}
}
/**
* When this future completes there will be no more calls against the servlet output stream or servlet response.
* The framework is still allowed to invoke us though.
*
* The future might complete in the servlet framework thread, user thread or executor thread.
*/
public CompletableFuture<Void> finishedFuture() {
return servletOutputStreamWriter.finishedFuture;
}
private void setResponse(Response jdiscResponse) {
synchronized (monitor) {
servletRequest.setAttribute(HttpResponseStatisticsCollector.requestTypeAttribute, jdiscResponse.getRequestType());
if (responseCommitted) {
log.log(Level.FINE,
jdiscResponse.getError(),
() -> "Response already committed, can't change response code. " +
"From: " + servletResponse.getStatus() + ", To: " + jdiscResponse.getStatus());
servletOutputStreamWriter.close();
return;
}
setStatus_holdingLock(jdiscResponse, servletResponse);
setHeaders_holdingLock(jdiscResponse, servletResponse);
}
}
private static void setHeaders_holdingLock(Response jdiscResponse, HttpServletResponse servletResponse) {
for (final Map.Entry<String, String> entry : jdiscResponse.headers().entries()) {
servletResponse.addHeader(entry.getKey(), entry.getValue());
}
if (servletResponse.getContentType() == null) {
servletResponse.setContentType("text/plain;charset=utf-8");
}
}
private static void setStatus_holdingLock(Response jdiscResponse, HttpServletResponse servletResponse) {
if (jdiscResponse instanceof HttpResponse) {
setStatus(servletResponse, jdiscResponse.getStatus(), Optional.ofNullable(((HttpResponse) jdiscResponse).getMessage()));
} else {
setStatus(servletResponse, jdiscResponse.getStatus(), getErrorMessage(jdiscResponse));
}
}
@SuppressWarnings("deprecation")
private static void setStatus(HttpServletResponse response, int statusCode, Optional<String> reasonPhrase) {
if (reasonPhrase.isPresent()) {
response.setStatus(statusCode, reasonPhrase.get());
} else {
response.setStatus(statusCode);
}
}
private static Optional<String> getErrorMessage(Response jdiscResponse) {
return Optional.ofNullable(jdiscResponse.getError()).flatMap(
error -> Optional.ofNullable(error.getMessage()));
}
private void commitResponse() {
synchronized (monitor) {
responseCommitted = true;
}
}
public final ResponseHandler responseHandler = new ResponseHandler() {
@Override
public ContentChannel handleResponse(Response response) {
setResponse(response);
return responseContentChannel;
}
};
public final ContentChannel responseContentChannel = new ContentChannel() {
@Override
public void write(ByteBuffer buf, CompletionHandler handler) {
commitResponse();
servletOutputStreamWriter.writeBuffer(buf, handlerOrNoopHandler(handler));
}
@Override
public void close(CompletionHandler handler) {
commitResponse();
servletOutputStreamWriter.close(handlerOrNoopHandler(handler));
}
private CompletionHandler handlerOrNoopHandler(CompletionHandler handler) {
return handler != null ? handler : NOOP_COMPLETION_HANDLER;
}
};
} | class ServletResponseController {
private static Logger log = Logger.getLogger(ServletResponseController.class.getName());
/**
* The servlet spec does not require (Http)ServletResponse nor ServletOutputStream to be thread-safe. Therefore,
* we must provide our own synchronization, since we may attempt to access these objects simultaneously from
* different threads. (The typical cause of this is when one thread is writing a response while another thread
* throws an exception, causing the request to fail with an error response).
*/
private final Object monitor = new Object();
private final HttpServletRequest servletRequest;
private final HttpServletResponse servletResponse;
private final boolean developerMode;
private final ErrorResponseContentCreator errorResponseContentCreator = new ErrorResponseContentCreator();
private final ServletOutputStreamWriter servletOutputStreamWriter;
private boolean responseCommitted = false;
public ServletResponseController(
HttpServletRequest servletRequest,
HttpServletResponse servletResponse,
Janitor janitor,
RequestMetricReporter metricReporter,
boolean developerMode) throws IOException {
this.servletRequest = servletRequest;
this.servletResponse = servletResponse;
this.developerMode = developerMode;
this.servletOutputStreamWriter =
new ServletOutputStreamWriter(servletResponse.getOutputStream(), janitor, metricReporter);
}
private static String getReasonPhrase(Throwable t, boolean developerMode) {
if (developerMode) {
final StringWriter out = new StringWriter();
t.printStackTrace(new PrintWriter(out));
return out.toString();
} else if (t.getMessage() != null) {
return t.getMessage();
} else {
return t.toString();
}
}
public void trySendError(Throwable t) {
final boolean responseWasCommitted;
try {
synchronized (monitor) {
String reasonPhrase = getReasonPhrase(t, developerMode);
int statusCode = getStatusCode(t);
responseWasCommitted = responseCommitted;
if (!responseCommitted) {
responseCommitted = true;
sendErrorAsync(statusCode, reasonPhrase);
}
}
} catch (Throwable e) {
servletOutputStreamWriter.fail(t);
return;
}
if (responseWasCommitted) {
RuntimeException exceptionWithStackTrace = new RuntimeException(t);
log.log(Level.FINE, "Response already committed, can't change response code", exceptionWithStackTrace);
servletOutputStreamWriter.close();
}
}
/**
* Async version of {@link org.eclipse.jetty.server.Response
*/
private void sendErrorAsync(int statusCode, String reasonPhrase) {
servletResponse.setHeader(HttpHeaders.Names.EXPIRES, null);
servletResponse.setHeader(HttpHeaders.Names.LAST_MODIFIED, null);
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_TYPE, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_LENGTH, null);
setStatus(servletResponse, statusCode, Optional.of(reasonPhrase));
if (statusCode != HttpServletResponse.SC_NO_CONTENT &&
statusCode != HttpServletResponse.SC_NOT_MODIFIED &&
statusCode != HttpServletResponse.SC_PARTIAL_CONTENT &&
statusCode >= HttpServletResponse.SC_OK) {
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, "must-revalidate,no-cache,no-store");
servletResponse.setContentType(MimeTypes.Type.TEXT_HTML_8859_1.toString());
byte[] errorContent = errorResponseContentCreator
.createErrorContent(servletRequest.getRequestURI(), statusCode, Optional.ofNullable(reasonPhrase));
servletResponse.setContentLength(errorContent.length);
servletOutputStreamWriter.sendErrorContentAndCloseAsync(ByteBuffer.wrap(errorContent));
} else {
servletResponse.setContentLength(0);
servletOutputStreamWriter.close();
}
}
/**
* When this future completes there will be no more calls against the servlet output stream or servlet response.
* The framework is still allowed to invoke us though.
*
* The future might complete in the servlet framework thread, user thread or executor thread.
*/
public CompletableFuture<Void> finishedFuture() {
return servletOutputStreamWriter.finishedFuture;
}
private void setResponse(Response jdiscResponse) {
synchronized (monitor) {
servletRequest.setAttribute(HttpResponseStatisticsCollector.requestTypeAttribute, jdiscResponse.getRequestType());
if (responseCommitted) {
log.log(Level.FINE,
jdiscResponse.getError(),
() -> "Response already committed, can't change response code. " +
"From: " + servletResponse.getStatus() + ", To: " + jdiscResponse.getStatus());
servletOutputStreamWriter.close();
return;
}
setStatus_holdingLock(jdiscResponse, servletResponse);
setHeaders_holdingLock(jdiscResponse, servletResponse);
}
}
private static void setHeaders_holdingLock(Response jdiscResponse, HttpServletResponse servletResponse) {
for (final Map.Entry<String, String> entry : jdiscResponse.headers().entries()) {
servletResponse.addHeader(entry.getKey(), entry.getValue());
}
if (servletResponse.getContentType() == null) {
servletResponse.setContentType("text/plain;charset=utf-8");
}
}
private static void setStatus_holdingLock(Response jdiscResponse, HttpServletResponse servletResponse) {
if (jdiscResponse instanceof HttpResponse) {
setStatus(servletResponse, jdiscResponse.getStatus(), Optional.ofNullable(((HttpResponse) jdiscResponse).getMessage()));
} else {
setStatus(servletResponse, jdiscResponse.getStatus(), getErrorMessage(jdiscResponse));
}
}
@SuppressWarnings("deprecation")
private static void setStatus(HttpServletResponse response, int statusCode, Optional<String> reasonPhrase) {
if (reasonPhrase.isPresent()) {
response.setStatus(statusCode, reasonPhrase.get());
} else {
response.setStatus(statusCode);
}
}
private static Optional<String> getErrorMessage(Response jdiscResponse) {
return Optional.ofNullable(jdiscResponse.getError()).flatMap(
error -> Optional.ofNullable(error.getMessage()));
}
private void commitResponse() {
synchronized (monitor) {
responseCommitted = true;
}
}
public final ResponseHandler responseHandler = new ResponseHandler() {
@Override
public ContentChannel handleResponse(Response response) {
setResponse(response);
return responseContentChannel;
}
};
public final ContentChannel responseContentChannel = new ContentChannel() {
@Override
public void write(ByteBuffer buf, CompletionHandler handler) {
commitResponse();
servletOutputStreamWriter.writeBuffer(buf, handlerOrNoopHandler(handler));
}
@Override
public void close(CompletionHandler handler) {
commitResponse();
servletOutputStreamWriter.close(handlerOrNoopHandler(handler));
}
private CompletionHandler handlerOrNoopHandler(CompletionHandler handler) {
return handler != null ? handler : NOOP_COMPLETION_HANDLER;
}
};
} |
Use `asSuccessFactor` here too? | protected double maintain() {
if ( ! nodeRepository().zone().environment().isProduction()) return 1.0;
int attempts = 0;
int successes = 0;
for (var application : activeNodesByApplication().entrySet()) {
attempts++;
successes += suggest(application.getKey(), application.getValue());
}
return attempts == 0 ? 1.0 : ((double)successes / attempts);
} | return attempts == 0 ? 1.0 : ((double)successes / attempts); | protected double maintain() {
if ( ! nodeRepository().zone().environment().isProduction()) return 1.0;
int attempts = 0;
int successes = 0;
for (var application : activeNodesByApplication().entrySet()) {
attempts++;
successes += suggest(application.getKey(), application.getValue());
}
return attempts == 0 ? 1.0 : ((double)successes / attempts);
} | class ScalingSuggestionsMaintainer extends NodeRepositoryMaintainer {
private final Autoscaler autoscaler;
public ScalingSuggestionsMaintainer(NodeRepository nodeRepository,
Duration interval,
Metric metric) {
super(nodeRepository, interval, metric);
this.autoscaler = new Autoscaler(nodeRepository);
}
@Override
private int suggest(ApplicationId application, NodeList applicationNodes) {
int successes = 0;
for (var cluster : nodesByCluster(applicationNodes).entrySet())
successes += suggest(application, cluster.getKey(), cluster.getValue()) ? 1 : 0;
return successes;
}
private Applications applications() {
return nodeRepository().applications();
}
private boolean suggest(ApplicationId applicationId,
ClusterSpec.Id clusterId,
NodeList clusterNodes) {
Application application = applications().get(applicationId).orElse(Application.empty(applicationId));
Optional<Cluster> cluster = application.cluster(clusterId);
if (cluster.isEmpty()) return true;
var suggestion = autoscaler.suggest(application, cluster.get(), clusterNodes);
if (suggestion.isEmpty()) return true;
try (Mutex lock = nodeRepository().nodes().lock(applicationId, Duration.ofSeconds(1))) {
var suggestedResources = suggestion.target().orElse(clusterNodes.not().retired().toResources());
applications().get(applicationId).ifPresent(a -> updateSuggestion(suggestedResources, clusterId, a, lock));
return true;
}
catch (ApplicationLockException e) {
return false;
}
}
private void updateSuggestion(ClusterResources suggestion,
ClusterSpec.Id clusterId,
Application application,
Mutex lock) {
Optional<Cluster> cluster = application.cluster(clusterId);
if (cluster.isEmpty()) return;
var at = nodeRepository().clock().instant();
var currentSuggestion = cluster.get().suggestedResources();
if (currentSuggestion.isEmpty()
|| currentSuggestion.get().at().isBefore(at.minus(Duration.ofDays(7)))
|| isHigher(suggestion, currentSuggestion.get().resources()))
applications().put(application.with(cluster.get().withSuggested(Optional.of(new Cluster.Suggestion(suggestion, at)))), lock);
}
private boolean isHigher(ClusterResources r1, ClusterResources r2) {
return r1.totalResources().cost() > r2.totalResources().cost();
}
private Map<ClusterSpec.Id, NodeList> nodesByCluster(NodeList applicationNodes) {
return applicationNodes.groupingBy(n -> n.allocation().get().membership().cluster().id());
}
} | class ScalingSuggestionsMaintainer extends NodeRepositoryMaintainer {
private final Autoscaler autoscaler;
public ScalingSuggestionsMaintainer(NodeRepository nodeRepository,
Duration interval,
Metric metric) {
super(nodeRepository, interval, metric);
this.autoscaler = new Autoscaler(nodeRepository);
}
@Override
private int suggest(ApplicationId application, NodeList applicationNodes) {
int successes = 0;
for (var cluster : nodesByCluster(applicationNodes).entrySet())
successes += suggest(application, cluster.getKey(), cluster.getValue()) ? 1 : 0;
return successes;
}
private Applications applications() {
return nodeRepository().applications();
}
private boolean suggest(ApplicationId applicationId,
ClusterSpec.Id clusterId,
NodeList clusterNodes) {
Application application = applications().get(applicationId).orElse(Application.empty(applicationId));
Optional<Cluster> cluster = application.cluster(clusterId);
if (cluster.isEmpty()) return true;
var suggestion = autoscaler.suggest(application, cluster.get(), clusterNodes);
if (suggestion.isEmpty()) return true;
try (Mutex lock = nodeRepository().nodes().lock(applicationId, Duration.ofSeconds(1))) {
var suggestedResources = suggestion.target().orElse(clusterNodes.not().retired().toResources());
applications().get(applicationId).ifPresent(a -> updateSuggestion(suggestedResources, clusterId, a, lock));
return true;
}
catch (ApplicationLockException e) {
return false;
}
}
private void updateSuggestion(ClusterResources suggestion,
ClusterSpec.Id clusterId,
Application application,
Mutex lock) {
Optional<Cluster> cluster = application.cluster(clusterId);
if (cluster.isEmpty()) return;
var at = nodeRepository().clock().instant();
var currentSuggestion = cluster.get().suggestedResources();
if (currentSuggestion.isEmpty()
|| currentSuggestion.get().at().isBefore(at.minus(Duration.ofDays(7)))
|| isHigher(suggestion, currentSuggestion.get().resources()))
applications().put(application.with(cluster.get().withSuggested(Optional.of(new Cluster.Suggestion(suggestion, at)))), lock);
}
private boolean isHigher(ClusterResources r1, ClusterResources r2) {
return r1.totalResources().cost() > r2.totalResources().cost();
}
private Map<ClusterSpec.Id, NodeList> nodesByCluster(NodeList applicationNodes) {
return applicationNodes.groupingBy(n -> n.allocation().get().membership().cluster().id());
}
} |
Use `asSuccessFactor` here too? | protected double maintain() {
int attempts = 0;
int successes = 0;
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, NodeList> retiredNodesByApplication = activeNodes.retired().groupingBy(node -> node.allocation().get().owner());
for (Map.Entry<ApplicationId, NodeList> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
NodeList retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
attempts++;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) continue;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
successes++;
}
}
return attempts == 0 ? 1.0 : ((double)successes / attempts);
} | return attempts == 0 ? 1.0 : ((double)successes / attempts); | protected double maintain() {
int attempts = 0;
int successes = 0;
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, NodeList> retiredNodesByApplication = activeNodes.retired().groupingBy(node -> node.allocation().get().owner());
for (Map.Entry<ApplicationId, NodeList> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
NodeList retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
attempts++;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) continue;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
successes++;
}
}
return attempts == 0 ? 1.0 : ((double)successes / attempts);
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
}
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
}
} |
Here is was easier to count successes than failures. | protected double maintain() {
int attempts = 0;
int successes = 0;
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, NodeList> retiredNodesByApplication = activeNodes.retired().groupingBy(node -> node.allocation().get().owner());
for (Map.Entry<ApplicationId, NodeList> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
NodeList retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
attempts++;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) continue;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
successes++;
}
}
return attempts == 0 ? 1.0 : ((double)successes / attempts);
} | return attempts == 0 ? 1.0 : ((double)successes / attempts); | protected double maintain() {
int attempts = 0;
int successes = 0;
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, NodeList> retiredNodesByApplication = activeNodes.retired().groupingBy(node -> node.allocation().get().owner());
for (Map.Entry<ApplicationId, NodeList> entry : retiredNodesByApplication.entrySet()) {
ApplicationId application = entry.getKey();
NodeList retiredNodes = entry.getValue();
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
attempts++;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
if ( ! success) continue;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
successes++;
}
}
return attempts == 0 ? 1.0 : ((double)successes / attempts);
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
}
} | class RetiredExpirer extends NodeRepositoryMaintainer {
private static final int NUM_CONFIG_SERVERS = 3;
private final Deployer deployer;
private final Metric metric;
private final Orchestrator orchestrator;
private final Duration retiredExpiry;
public RetiredExpirer(NodeRepository nodeRepository,
Orchestrator orchestrator,
Deployer deployer,
Metric metric,
Duration maintenanceInterval,
Duration retiredExpiry) {
super(nodeRepository, maintenanceInterval, metric);
this.deployer = deployer;
this.metric = metric;
this.orchestrator = orchestrator;
this.retiredExpiry = retiredExpiry;
}
@Override
/**
* Checks if the node can be removed:
* if the node is a host, it will only be removed if it has no children,
* or all its children are parked or failed.
* Otherwise, a removal is allowed if either of these are true:
* - The node has been in state {@link History.Event.Type
* - Orchestrator allows it
*/
private boolean canRemove(Node node, NodeList activeNodes) {
if (node.type().isHost()) {
if (nodeRepository().nodes().list().childrenOf(node).asList().stream()
.allMatch(child -> child.state() == Node.State.parked ||
child.state() == Node.State.failed)) {
log.info("Host " + node + " has no non-parked/failed children");
return true;
}
return false;
}
if (node.type().isConfigServerLike()) {
if (activeNodes.nodeType(node.type()).size() < NUM_CONFIG_SERVERS) {
return false;
}
} else if (node.history().hasEventBefore(History.Event.Type.retired, clock().instant().minus(retiredExpiry))) {
log.warning("Node " + node + " has been retired longer than " + retiredExpiry + ": Allowing removal. This may cause data loss");
return true;
}
try {
orchestrator.acquirePermissionToRemove(new HostName(node.hostname()));
log.info("Node " + node + " has been granted permission to be removed");
return true;
} catch (UncheckedTimeoutException e) {
log.warning("Timed out trying to acquire permission to remove " + node.hostname() + ": " + Exceptions.toMessageString(e));
return false;
} catch (OrchestrationException e) {
log.info("Did not get permission to remove retired " + node + ": " + Exceptions.toMessageString(e));
return false;
}
}
} |
could use `rebuilding` here | public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
boolean rebuilding = existing.get().status().wantToRebuild();
if (rebuilding) {
node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(),
false,
existing.get().status().wantToRebuild()));
}
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
} | existing.get().status().wantToRebuild())); | public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
boolean rebuilding = existing.get().status().wantToRebuild();
if (rebuilding) {
node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(),
false,
rebuilding));
}
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final Zone zone;
private final Clock clock;
private final CuratorDatabaseClient db;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) {
this.zone = zone;
this.clock = clock;
this.db = db;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.withWantToRetire(false,
false,
false,
Agent.system,
clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = requireNode(hostname);
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(List.of(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent)) {
boolean keepAllocation = node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent();
return park(node.hostname(), keepAllocation, agent, reason, transaction);
} else {
return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, true, agent, reason);
}
public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, keepAllocation, agent, reason, transaction);
transaction.commit();
return parked;
}
private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, true, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, true, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, true, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(List.of(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.getCloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.getCloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restartActive(Predicate<Node> filter) {
return restart(StateFilter.from(Node.State.active).and(filter));
}
/**
* Increases the restart generation of the any nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.deprovision, agent, instant);
}
/** Retire and rebuild given host and all of its children */
public List<Node> rebuild(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.rebuild, agent, instant);
}
/** Retire and encrypt given host and all of its children */
public List<Node> encrypt(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.encrypt, agent, instant);
}
private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
List<Node> result;
boolean wantToDeprovision = op == DecommissionOperation.deprovision;
boolean wantToRebuild = op == DecommissionOperation.rebuild;
Optional<Report> wantToEncryptReport = op == DecommissionOperation.encrypt
? Optional.of(Report.basicReport(Report.WANT_TO_ENCRYPT_ID, Report.Type.UNSPECIFIED, instant, ""))
: Optional.empty();
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
host = lock.node();
result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> {
Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (wantToEncryptReport.isPresent()) {
newNode = newNode.with(newNode.reports().withReport(wantToEncryptReport.get()));
}
return write(newNode, nodeLock);
});
Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (wantToEncryptReport.isPresent()) {
newHost = newHost.with(newHost.reports().withReport(wantToEncryptReport.get()));
}
result.add(write(newHost, lock));
}
return result;
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning());
}
public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent() ||
retirementRequestedByOperator;
}
/** The different ways a host can be decommissioned */
private enum DecommissionOperation {
deprovision,
rebuild,
encrypt,
}
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final Zone zone;
private final Clock clock;
private final CuratorDatabaseClient db;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) {
this.zone = zone;
this.clock = clock;
this.db = db;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.withWantToRetire(false,
false,
false,
Agent.system,
clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = requireNode(hostname);
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(List.of(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent)) {
boolean keepAllocation = node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent();
return park(node.hostname(), keepAllocation, agent, reason, transaction);
} else {
return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, true, agent, reason);
}
public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, keepAllocation, agent, reason, transaction);
transaction.commit();
return parked;
}
private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, true, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, true, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, true, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(List.of(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.getCloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.getCloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restartActive(Predicate<Node> filter) {
return restart(StateFilter.from(Node.State.active).and(filter));
}
/**
* Increases the restart generation of the any nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.deprovision, agent, instant);
}
/** Retire and rebuild given host and all of its children */
public List<Node> rebuild(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.rebuild, agent, instant);
}
/** Retire and encrypt given host and all of its children */
public List<Node> encrypt(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.encrypt, agent, instant);
}
private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
List<Node> result;
boolean wantToDeprovision = op == DecommissionOperation.deprovision;
boolean wantToRebuild = op == DecommissionOperation.rebuild;
Optional<Report> wantToEncryptReport = op == DecommissionOperation.encrypt
? Optional.of(Report.basicReport(Report.WANT_TO_ENCRYPT_ID, Report.Type.UNSPECIFIED, instant, ""))
: Optional.empty();
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
host = lock.node();
result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> {
Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (wantToEncryptReport.isPresent()) {
newNode = newNode.with(newNode.reports().withReport(wantToEncryptReport.get()));
}
return write(newNode, nodeLock);
});
Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (wantToEncryptReport.isPresent()) {
newHost = newHost.with(newHost.reports().withReport(wantToEncryptReport.get()));
}
result.add(write(newHost, lock));
}
return result;
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning());
}
public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent() ||
retirementRequestedByOperator;
}
/** The different ways a host can be decommissioned */
private enum DecommissionOperation {
deprovision,
rebuild,
encrypt,
}
} |
which is `true` (but more descriptive) | public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
boolean rebuilding = existing.get().status().wantToRebuild();
if (rebuilding) {
node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(),
false,
existing.get().status().wantToRebuild()));
}
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
} | existing.get().status().wantToRebuild())); | public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
boolean rebuilding = existing.get().status().wantToRebuild();
if (rebuilding) {
node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(),
false,
rebuilding));
}
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final Zone zone;
private final Clock clock;
private final CuratorDatabaseClient db;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) {
this.zone = zone;
this.clock = clock;
this.db = db;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.withWantToRetire(false,
false,
false,
Agent.system,
clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = requireNode(hostname);
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(List.of(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent)) {
boolean keepAllocation = node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent();
return park(node.hostname(), keepAllocation, agent, reason, transaction);
} else {
return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, true, agent, reason);
}
public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, keepAllocation, agent, reason, transaction);
transaction.commit();
return parked;
}
private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, true, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, true, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, true, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(List.of(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.getCloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.getCloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restartActive(Predicate<Node> filter) {
return restart(StateFilter.from(Node.State.active).and(filter));
}
/**
* Increases the restart generation of the any nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.deprovision, agent, instant);
}
/** Retire and rebuild given host and all of its children */
public List<Node> rebuild(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.rebuild, agent, instant);
}
/** Retire and encrypt given host and all of its children */
public List<Node> encrypt(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.encrypt, agent, instant);
}
private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
List<Node> result;
boolean wantToDeprovision = op == DecommissionOperation.deprovision;
boolean wantToRebuild = op == DecommissionOperation.rebuild;
Optional<Report> wantToEncryptReport = op == DecommissionOperation.encrypt
? Optional.of(Report.basicReport(Report.WANT_TO_ENCRYPT_ID, Report.Type.UNSPECIFIED, instant, ""))
: Optional.empty();
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
host = lock.node();
result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> {
Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (wantToEncryptReport.isPresent()) {
newNode = newNode.with(newNode.reports().withReport(wantToEncryptReport.get()));
}
return write(newNode, nodeLock);
});
Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (wantToEncryptReport.isPresent()) {
newHost = newHost.with(newHost.reports().withReport(wantToEncryptReport.get()));
}
result.add(write(newHost, lock));
}
return result;
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning());
}
public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent() ||
retirementRequestedByOperator;
}
/** The different ways a host can be decommissioned */
private enum DecommissionOperation {
deprovision,
rebuild,
encrypt,
}
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final Zone zone;
private final Clock clock;
private final CuratorDatabaseClient db;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) {
this.zone = zone;
this.clock = clock;
this.db = db;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.withWantToRetire(false,
false,
false,
Agent.system,
clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = requireNode(hostname);
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(List.of(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent)) {
boolean keepAllocation = node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent();
return park(node.hostname(), keepAllocation, agent, reason, transaction);
} else {
return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, true, agent, reason);
}
public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, keepAllocation, agent, reason, transaction);
transaction.commit();
return parked;
}
private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, true, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, true, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, true, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(List.of(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.getCloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.getCloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restartActive(Predicate<Node> filter) {
return restart(StateFilter.from(Node.State.active).and(filter));
}
/**
* Increases the restart generation of the any nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.deprovision, agent, instant);
}
/** Retire and rebuild given host and all of its children */
public List<Node> rebuild(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.rebuild, agent, instant);
}
/** Retire and encrypt given host and all of its children */
public List<Node> encrypt(String hostname, Agent agent, Instant instant) {
return decommission(hostname, DecommissionOperation.encrypt, agent, instant);
}
private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
List<Node> result;
boolean wantToDeprovision = op == DecommissionOperation.deprovision;
boolean wantToRebuild = op == DecommissionOperation.rebuild;
Optional<Report> wantToEncryptReport = op == DecommissionOperation.encrypt
? Optional.of(Report.basicReport(Report.WANT_TO_ENCRYPT_ID, Report.Type.UNSPECIFIED, instant, ""))
: Optional.empty();
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
host = lock.node();
result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> {
Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (wantToEncryptReport.isPresent()) {
newNode = newNode.with(newNode.reports().withReport(wantToEncryptReport.get()));
}
return write(newNode, nodeLock);
});
Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant);
if (wantToEncryptReport.isPresent()) {
newHost = newHost.with(newHost.reports().withReport(wantToEncryptReport.get()));
}
result.add(write(newHost, lock));
}
return result;
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning());
}
public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
node.reports().getReport(Report.WANT_TO_ENCRYPT_ID).isPresent() ||
retirementRequestedByOperator;
}
/** The different ways a host can be decommissioned */
private enum DecommissionOperation {
deprovision,
rebuild,
encrypt,
}
} |
```suggestion result.whenCompleteAsync((r, t) -> { if (!finalCallbackInvoked.get()) { resultCallback.onNextResult(r, t); } if (pending.decrementAndGet() == 0 && finalCallbackInvoked.compareAndSet(false, true)) { resultCallback.onComplete(); overallResult.complete(null); } }, resultExecutor); ``` Let result executor do more work, for less contention? | CompletableFuture<Void> feedMany(InputStream jsonStream, int size, ResultCallback resultCallback) {
RingBufferStream buffer = new RingBufferStream(jsonStream, size);
CompletableFuture<Void> overallResult = new CompletableFuture<>();
CompletableFuture<Result> result;
AtomicInteger pending = new AtomicInteger(1);
AtomicBoolean finalCallbackInvoked = new AtomicBoolean();
try {
while ((result = buffer.next()) != null) {
pending.incrementAndGet();
result.whenComplete((r, t) -> {
if (!finalCallbackInvoked.get()) {
resultExecutor.execute(() -> resultCallback.onNextResult(r, t));
}
if (pending.decrementAndGet() == 0 && finalCallbackInvoked.compareAndSet(false, true)) {
resultExecutor.execute(() -> {
resultCallback.onComplete();
overallResult.complete(null);
});
}
});
}
if (pending.decrementAndGet() == 0 && finalCallbackInvoked.compareAndSet(false, true)) {
resultExecutor.execute(() -> {
resultCallback.onComplete();
overallResult.complete(null);
});
}
} catch (Exception e) {
if (finalCallbackInvoked.compareAndSet(false, true)) {
resultExecutor.execute(() -> {
resultCallback.onError(e);
overallResult.completeExceptionally(e);
});
}
}
return overallResult;
} | }); | CompletableFuture<Void> feedMany(InputStream jsonStream, int size, ResultCallback resultCallback) {
RingBufferStream buffer = new RingBufferStream(jsonStream, size);
CompletableFuture<Void> overallResult = new CompletableFuture<>();
CompletableFuture<Result> result;
AtomicInteger pending = new AtomicInteger(1);
AtomicBoolean finalCallbackInvoked = new AtomicBoolean();
try {
while ((result = buffer.next()) != null) {
pending.incrementAndGet();
result.whenCompleteAsync((r, t) -> {
if (!finalCallbackInvoked.get()) {
resultCallback.onNextResult(r, t);
}
if (pending.decrementAndGet() == 0 && finalCallbackInvoked.compareAndSet(false, true)) {
resultCallback.onComplete();
overallResult.complete(null);
}
}, resultExecutor);
}
if (pending.decrementAndGet() == 0 && finalCallbackInvoked.compareAndSet(false, true)) {
resultExecutor.execute(() -> {
resultCallback.onComplete();
overallResult.complete(null);
});
}
} catch (Exception e) {
if (finalCallbackInvoked.compareAndSet(false, true)) {
resultExecutor.execute(() -> {
resultCallback.onError(e);
overallResult.completeExceptionally(e);
});
}
}
return overallResult;
} | class JsonFeeder implements Closeable {
private final ExecutorService resultExecutor = Executors.newSingleThreadExecutor(r -> {
Thread t = new Thread(r, "json-feeder-result-executor");
t.setDaemon(true);
return t;
});
private final FeedClient client;
private final OperationParameters protoParameters;
private JsonFeeder(FeedClient client, OperationParameters protoParameters) {
this.client = client;
this.protoParameters = protoParameters;
}
public interface ResultCallback {
/**
* Invoked after each operation has either completed successfully or failed
*
* @param result Non-null if operation completed successfully
* @param error Non-null if operation failed
*/
void onNextResult(Result result, Throwable error);
/**
* Invoked if an unrecoverable error occurred during feed processing,
* after which no other {@link ResultCallback} methods are invoked.
*/
void onError(Throwable error);
/**
* Invoked when all feed operations are either completed successfully or failed.
*/
void onComplete();
}
public static Builder builder(FeedClient client) { return new Builder(client); }
/** Feeds a stream containing a JSON array of feed operations on the form
* <pre>
* [
* {
* "id": "id:ns:type::boo",
* "fields": { ... document fields ... }
* },
* {
* "put": "id:ns:type::foo",
* "fields": { ... document fields ... }
* },
* {
* "update": "id:ns:type:n=4:bar",
* "create": true,
* "fields": { ... partial update fields ... }
* },
* {
* "remove": "id:ns:type:g=foo:bar",
* "condition": "type.baz = \"bax\""
* },
* ...
* ]
* </pre>
* Note that {@code "id"} is an alias for the document put operation.
*/
public CompletableFuture<Void> feedMany(InputStream jsonStream, ResultCallback resultCallback) {
return feedMany(jsonStream, 1 << 26, resultCallback);
}
private static final JsonFactory factory = new JsonFactory();
@Override public void close() throws IOException {
client.close();
resultExecutor.shutdown();
try {
if (!resultExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
throw new IOException("Failed to close client in time");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
private class RingBufferStream extends InputStream {
private final byte[] b = new byte[1];
private final InputStream in;
private final byte[] data;
private final int size;
private final Object lock = new Object();
private Throwable thrown = null;
private long tail = 0;
private long pos = 0;
private long head = 0;
private boolean done = false;
private final OperationParserAndExecutor parserAndExecutor;
RingBufferStream(InputStream in, int size) {
this.in = in;
this.data = new byte[size];
this.size = size;
new Thread(this::fill, "feed-reader").start();
try { this.parserAndExecutor = new RingBufferBackedOperationParserAndExecutor(factory.createParser(this)); }
catch (IOException e) { throw new UncheckedIOException(e); }
}
@Override
public int read() throws IOException {
return read(b, 0, 1) == -1 ? -1 : b[0];
}
@Override
public int read(byte[] buffer, int off, int len) throws IOException {
try {
int ready;
synchronized (lock) {
while ((ready = (int) (head - pos)) == 0 && ! done)
lock.wait();
}
if (thrown != null) throw new RuntimeException("Error reading input", thrown);
if (ready == 0) return -1;
ready = min(ready, len);
int offset = (int) (pos % size);
int length = min(ready, size - offset);
System.arraycopy(data, offset, buffer, off, length);
if (length < ready)
System.arraycopy(data, 0, buffer, off + length, ready - length);
pos += ready;
return ready;
}
catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted waiting for data: " + e.getMessage());
}
}
public CompletableFuture<Result> next() throws IOException {
return parserAndExecutor.next();
}
private final byte[] prefix = "{\"fields\":".getBytes(UTF_8);
private byte[] copy(long start, long end) {
int length = (int) (end - start);
byte[] buffer = new byte[prefix.length + length + 1];
System.arraycopy(prefix, 0, buffer, 0, prefix.length);
int offset = (int) (start % size);
int toWrite = min(length, size - offset);
System.arraycopy(data, offset, buffer, prefix.length, toWrite);
if (toWrite < length)
System.arraycopy(data, 0, buffer, prefix.length + toWrite, length - toWrite);
buffer[buffer.length - 1] = '}';
return buffer;
}
@Override
public void close() throws IOException {
synchronized (lock) {
done = true;
lock.notifyAll();
}
in.close();
}
private void fill() {
try {
while (true) {
int free;
synchronized (lock) {
while ((free = (int) (tail + size - head)) <= 0 && ! done)
lock.wait();
}
if (done) break;
int off = (int) (head % size);
int len = min(min(free, size - off), 1 << 13);
int read = in.read(data, off, len);
synchronized (lock) {
if (read < 0) done = true;
else head += read;
lock.notify();
}
}
}
catch (Throwable t) {
synchronized (lock) {
done = true;
thrown = t;
}
}
}
private class RingBufferBackedOperationParserAndExecutor extends OperationParserAndExecutor {
RingBufferBackedOperationParserAndExecutor(JsonParser parser) throws IOException { super(parser, true); }
@Override
String getDocumentJson(long start, long end) {
String payload = new String(copy(start, end), UTF_8);
synchronized (lock) {
tail = end;
lock.notify();
}
return payload;
}
}
}
private abstract class OperationParserAndExecutor {
private final JsonParser parser;
private final boolean multipleOperations;
protected OperationParserAndExecutor(JsonParser parser, boolean multipleOperations) throws IOException {
this.parser = parser;
this.multipleOperations = multipleOperations;
if (multipleOperations) expect(START_ARRAY);
}
abstract String getDocumentJson(long start, long end);
CompletableFuture<Result> next() throws IOException {
JsonToken token = parser.nextToken();
if (token == END_ARRAY && multipleOperations) return null;
else if (token == null && !multipleOperations) return null;
else if (token == START_OBJECT);
else throw new IllegalArgumentException("Unexpected token '" + parser.currentToken() + "' at offset " + parser.getTokenLocation().getByteOffset());
long start = 0, end = -1;
OperationType type = null;
DocumentId id = null;
OperationParameters parameters = protoParameters;
loop: while (true) {
switch (parser.nextToken()) {
case FIELD_NAME:
switch (parser.getText()) {
case "id":
case "put": type = PUT; id = readId(); break;
case "update": type = UPDATE; id = readId(); break;
case "remove": type = REMOVE; id = readId(); break;
case "condition": parameters = parameters.testAndSetCondition(readString()); break;
case "create": parameters = parameters.createIfNonExistent(readBoolean()); break;
case "fields": {
expect(START_OBJECT);
start = parser.getTokenLocation().getByteOffset();
int depth = 1;
while (depth > 0) switch (parser.nextToken()) {
case START_OBJECT: ++depth; break;
case END_OBJECT: --depth; break;
}
end = parser.getTokenLocation().getByteOffset() + 1;
break;
}
default: throw new IllegalArgumentException("Unexpected field name '" + parser.getText() + "' at offset " +
parser.getTokenLocation().getByteOffset());
}
break;
case END_OBJECT:
break loop;
default:
throw new IllegalArgumentException("Unexpected token '" + parser.currentToken() + "' at offset " +
parser.getTokenLocation().getByteOffset());
}
}
if (id == null)
throw new IllegalArgumentException("No document id for document at offset " + start);
if (end < start)
throw new IllegalArgumentException("No 'fields' object for document at offset " + parser.getTokenLocation().getByteOffset());
String payload = getDocumentJson(start, end);
switch (type) {
case PUT: return client.put (id, payload, parameters);
case UPDATE: return client.update(id, payload, parameters);
case REMOVE: return client.remove(id, parameters);
default: throw new IllegalStateException("Unexpected operation type '" + type + "'");
}
}
void expect(JsonToken token) throws IOException {
if (parser.nextToken() != token)
throw new IllegalArgumentException("Expected '" + token + "' at offset " + parser.getTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
}
private String readString() throws IOException {
String value = parser.nextTextValue();
if (value == null)
throw new IllegalArgumentException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
return value;
}
private boolean readBoolean() throws IOException {
Boolean value = parser.nextBooleanValue();
if (value == null)
throw new IllegalArgumentException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
return value;
}
private DocumentId readId() throws IOException {
return DocumentId.of(readString());
}
}
public static class Builder {
final FeedClient client;
OperationParameters parameters = OperationParameters.empty();
private Builder(FeedClient client) {
this.client = requireNonNull(client);
}
public Builder withTimeout(Duration timeout) {
parameters = parameters.timeout(timeout);
return this;
}
public Builder withRoute(String route) {
parameters = parameters.route(route);
return this;
}
public Builder withTracelevel(int tracelevel) {
parameters = parameters.tracelevel(tracelevel);
return this;
}
public JsonFeeder build() {
return new JsonFeeder(client, parameters);
}
}
} | class JsonFeeder implements Closeable {
private final ExecutorService resultExecutor = Executors.newSingleThreadExecutor(r -> {
Thread t = new Thread(r, "json-feeder-result-executor");
t.setDaemon(true);
return t;
});
private final FeedClient client;
private final OperationParameters protoParameters;
private JsonFeeder(FeedClient client, OperationParameters protoParameters) {
this.client = client;
this.protoParameters = protoParameters;
}
public interface ResultCallback {
/**
* Invoked after each operation has either completed successfully or failed
*
* @param result Non-null if operation completed successfully
* @param error Non-null if operation failed
*/
void onNextResult(Result result, Throwable error);
/**
* Invoked if an unrecoverable error occurred during feed processing,
* after which no other {@link ResultCallback} methods are invoked.
*/
void onError(Throwable error);
/**
* Invoked when all feed operations are either completed successfully or failed.
*/
void onComplete();
}
public static Builder builder(FeedClient client) { return new Builder(client); }
/** Feeds a stream containing a JSON array of feed operations on the form
* <pre>
* [
* {
* "id": "id:ns:type::boo",
* "fields": { ... document fields ... }
* },
* {
* "put": "id:ns:type::foo",
* "fields": { ... document fields ... }
* },
* {
* "update": "id:ns:type:n=4:bar",
* "create": true,
* "fields": { ... partial update fields ... }
* },
* {
* "remove": "id:ns:type:g=foo:bar",
* "condition": "type.baz = \"bax\""
* },
* ...
* ]
* </pre>
* Note that {@code "id"} is an alias for the document put operation.
*/
public CompletableFuture<Void> feedMany(InputStream jsonStream, ResultCallback resultCallback) {
return feedMany(jsonStream, 1 << 26, resultCallback);
}
private static final JsonFactory factory = new JsonFactory();
@Override public void close() throws IOException {
client.close();
resultExecutor.shutdown();
try {
if (!resultExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
throw new IOException("Failed to close client in time");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
private class RingBufferStream extends InputStream {
private final byte[] b = new byte[1];
private final InputStream in;
private final byte[] data;
private final int size;
private final Object lock = new Object();
private Throwable thrown = null;
private long tail = 0;
private long pos = 0;
private long head = 0;
private boolean done = false;
private final OperationParserAndExecutor parserAndExecutor;
RingBufferStream(InputStream in, int size) {
this.in = in;
this.data = new byte[size];
this.size = size;
new Thread(this::fill, "feed-reader").start();
try { this.parserAndExecutor = new RingBufferBackedOperationParserAndExecutor(factory.createParser(this)); }
catch (IOException e) { throw new UncheckedIOException(e); }
}
@Override
public int read() throws IOException {
return read(b, 0, 1) == -1 ? -1 : b[0];
}
@Override
public int read(byte[] buffer, int off, int len) throws IOException {
try {
int ready;
synchronized (lock) {
while ((ready = (int) (head - pos)) == 0 && ! done)
lock.wait();
}
if (thrown != null) throw new RuntimeException("Error reading input", thrown);
if (ready == 0) return -1;
ready = min(ready, len);
int offset = (int) (pos % size);
int length = min(ready, size - offset);
System.arraycopy(data, offset, buffer, off, length);
if (length < ready)
System.arraycopy(data, 0, buffer, off + length, ready - length);
pos += ready;
return ready;
}
catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted waiting for data: " + e.getMessage());
}
}
public CompletableFuture<Result> next() throws IOException {
return parserAndExecutor.next();
}
private final byte[] prefix = "{\"fields\":".getBytes(UTF_8);
private byte[] copy(long start, long end) {
int length = (int) (end - start);
byte[] buffer = new byte[prefix.length + length + 1];
System.arraycopy(prefix, 0, buffer, 0, prefix.length);
int offset = (int) (start % size);
int toWrite = min(length, size - offset);
System.arraycopy(data, offset, buffer, prefix.length, toWrite);
if (toWrite < length)
System.arraycopy(data, 0, buffer, prefix.length + toWrite, length - toWrite);
buffer[buffer.length - 1] = '}';
return buffer;
}
@Override
public void close() throws IOException {
synchronized (lock) {
done = true;
lock.notifyAll();
}
in.close();
}
private void fill() {
try {
while (true) {
int free;
synchronized (lock) {
while ((free = (int) (tail + size - head)) <= 0 && ! done)
lock.wait();
}
if (done) break;
int off = (int) (head % size);
int len = min(min(free, size - off), 1 << 13);
int read = in.read(data, off, len);
synchronized (lock) {
if (read < 0) done = true;
else head += read;
lock.notify();
}
}
}
catch (Throwable t) {
synchronized (lock) {
done = true;
thrown = t;
}
}
}
private class RingBufferBackedOperationParserAndExecutor extends OperationParserAndExecutor {
RingBufferBackedOperationParserAndExecutor(JsonParser parser) throws IOException { super(parser, true); }
@Override
String getDocumentJson(long start, long end) {
String payload = new String(copy(start, end), UTF_8);
synchronized (lock) {
tail = end;
lock.notify();
}
return payload;
}
}
}
private abstract class OperationParserAndExecutor {
private final JsonParser parser;
private final boolean multipleOperations;
protected OperationParserAndExecutor(JsonParser parser, boolean multipleOperations) throws IOException {
this.parser = parser;
this.multipleOperations = multipleOperations;
if (multipleOperations) expect(START_ARRAY);
}
abstract String getDocumentJson(long start, long end);
CompletableFuture<Result> next() throws IOException {
JsonToken token = parser.nextToken();
if (token == END_ARRAY && multipleOperations) return null;
else if (token == null && !multipleOperations) return null;
else if (token == START_OBJECT);
else throw new IllegalArgumentException("Unexpected token '" + parser.currentToken() + "' at offset " + parser.getTokenLocation().getByteOffset());
long start = 0, end = -1;
OperationType type = null;
DocumentId id = null;
OperationParameters parameters = protoParameters;
loop: while (true) {
switch (parser.nextToken()) {
case FIELD_NAME:
switch (parser.getText()) {
case "id":
case "put": type = PUT; id = readId(); break;
case "update": type = UPDATE; id = readId(); break;
case "remove": type = REMOVE; id = readId(); break;
case "condition": parameters = parameters.testAndSetCondition(readString()); break;
case "create": parameters = parameters.createIfNonExistent(readBoolean()); break;
case "fields": {
expect(START_OBJECT);
start = parser.getTokenLocation().getByteOffset();
int depth = 1;
while (depth > 0) switch (parser.nextToken()) {
case START_OBJECT: ++depth; break;
case END_OBJECT: --depth; break;
}
end = parser.getTokenLocation().getByteOffset() + 1;
break;
}
default: throw new IllegalArgumentException("Unexpected field name '" + parser.getText() + "' at offset " +
parser.getTokenLocation().getByteOffset());
}
break;
case END_OBJECT:
break loop;
default:
throw new IllegalArgumentException("Unexpected token '" + parser.currentToken() + "' at offset " +
parser.getTokenLocation().getByteOffset());
}
}
if (id == null)
throw new IllegalArgumentException("No document id for document at offset " + start);
if (end < start)
throw new IllegalArgumentException("No 'fields' object for document at offset " + parser.getTokenLocation().getByteOffset());
String payload = getDocumentJson(start, end);
switch (type) {
case PUT: return client.put (id, payload, parameters);
case UPDATE: return client.update(id, payload, parameters);
case REMOVE: return client.remove(id, parameters);
default: throw new IllegalStateException("Unexpected operation type '" + type + "'");
}
}
void expect(JsonToken token) throws IOException {
if (parser.nextToken() != token)
throw new IllegalArgumentException("Expected '" + token + "' at offset " + parser.getTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
}
private String readString() throws IOException {
String value = parser.nextTextValue();
if (value == null)
throw new IllegalArgumentException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
return value;
}
private boolean readBoolean() throws IOException {
Boolean value = parser.nextBooleanValue();
if (value == null)
throw new IllegalArgumentException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
return value;
}
private DocumentId readId() throws IOException {
return DocumentId.of(readString());
}
}
public static class Builder {
final FeedClient client;
OperationParameters parameters = OperationParameters.empty();
private Builder(FeedClient client) {
this.client = requireNonNull(client);
}
public Builder withTimeout(Duration timeout) {
parameters = parameters.timeout(timeout);
return this;
}
public Builder withRoute(String route) {
parameters = parameters.route(route);
return this;
}
public Builder withTracelevel(int tracelevel) {
parameters = parameters.tracelevel(tracelevel);
return this;
}
public JsonFeeder build() {
return new JsonFeeder(client, parameters);
}
}
} |
Is it always the same thread incrementing these counter? | public void dispatch(SimpleHttpRequest request, CompletableFuture<SimpleHttpResponse> vessel) {
requests.incrementAndGet();
long startMillis = System.currentTimeMillis();
delegate.dispatch(request, vessel);
vessel.whenCompleteAsync((response, thrown) -> {
results++;
if (thrown == null) {
responses++;
responsesByCode[response.getCode()]++;
long latency = System.currentTimeMillis() - startMillis;
totalLatencyMillis += latency;
minLatencyMillis = Math.min(minLatencyMillis, latency);
maxLatencyMillis = Math.max(maxLatencyMillis, latency);
bytesSent += request.getBodyBytes() == null ? 0 : request.getBodyBytes().length;
bytesReceived += response.getBodyBytes() == null ? 0 : response.getBodyBytes().length;
}
else
exceptions++;
},
executor);
} | results++; | public void dispatch(SimpleHttpRequest request, CompletableFuture<SimpleHttpResponse> vessel) {
requests.incrementAndGet();
long startMillis = System.currentTimeMillis();
delegate.dispatch(request, vessel);
vessel.whenCompleteAsync((response, thrown) -> {
results++;
if (thrown == null) {
responses++;
responsesByCode[response.getCode()]++;
long latency = System.currentTimeMillis() - startMillis;
totalLatencyMillis += latency;
minLatencyMillis = Math.min(minLatencyMillis, latency);
maxLatencyMillis = Math.max(maxLatencyMillis, latency);
bytesSent += request.getBodyBytes() == null ? 0 : request.getBodyBytes().length;
bytesReceived += response.getBodyBytes() == null ? 0 : response.getBodyBytes().length;
}
else
exceptions++;
},
executor);
} | class BenchmarkingCluster implements Cluster {
private final Cluster delegate;
private final ExecutorService executor = Executors.newSingleThreadExecutor(runnable -> {
Thread thread = new Thread(runnable, "cluster-stats-collector");
thread.setDaemon(true);
return thread;
});
private final AtomicLong requests = new AtomicLong();
private long results = 0;
private long responses = 0;
private final long[] responsesByCode = new long[600];
private long exceptions = 0;
private long totalLatencyMillis = 0;
private long minLatencyMillis = 0;
private long maxLatencyMillis = 0;
private long bytesSent = 0;
private long bytesReceived = 0;
public BenchmarkingCluster(Cluster delegate) {
this.delegate = requireNonNull(delegate);
}
@Override
@Override
public Stats stats() {
try {
try {
return executor.submit(this::getStats).get();
}
catch (RejectedExecutionException ignored) {
executor.awaitTermination(10, TimeUnit.SECONDS);
return getStats();
}
}
catch (InterruptedException | ExecutionException ignored) {
throw new RuntimeException(ignored);
}
}
private Stats getStats() {
Map<Integer, Long> responses = new HashMap<>();
for (int code = 0; code < responsesByCode.length; code++)
if (responsesByCode[code] > 0)
responses.put(code, responsesByCode[code]);
return new Stats(requests.get(),
responses,
exceptions,
requests.get() - results,
totalLatencyMillis / this.responses,
minLatencyMillis,
maxLatencyMillis,
bytesSent,
bytesReceived);
}
@Override
public void close() {
delegate.close();
executor.shutdown();
}
} | class BenchmarkingCluster implements Cluster {
private final Cluster delegate;
private final ExecutorService executor = Executors.newSingleThreadExecutor(runnable -> {
Thread thread = new Thread(runnable, "cluster-stats-collector");
thread.setDaemon(true);
return thread;
});
private final AtomicLong requests = new AtomicLong();
private long results = 0;
private long responses = 0;
private final long[] responsesByCode = new long[600];
private long exceptions = 0;
private long totalLatencyMillis = 0;
private long minLatencyMillis = 0;
private long maxLatencyMillis = 0;
private long bytesSent = 0;
private long bytesReceived = 0;
public BenchmarkingCluster(Cluster delegate) {
this.delegate = requireNonNull(delegate);
}
@Override
@Override
public OperationStats stats() {
try {
try {
return executor.submit(this::getStats).get();
}
catch (RejectedExecutionException ignored) {
executor.awaitTermination(10, TimeUnit.SECONDS);
return getStats();
}
}
catch (InterruptedException | ExecutionException ignored) {
throw new RuntimeException(ignored);
}
}
private OperationStats getStats() {
Map<Integer, Long> responses = new HashMap<>();
for (int code = 0; code < responsesByCode.length; code++)
if (responsesByCode[code] > 0)
responses.put(code, responsesByCode[code]);
return new OperationStats(requests.get(),
responses,
exceptions,
requests.get() - results,
totalLatencyMillis / this.responses,
minLatencyMillis,
maxLatencyMillis,
bytesSent,
bytesReceived);
}
@Override
public void close() {
delegate.close();
executor.shutdown();
}
} |
Alternatively, just wait for the nullptr exception and fix the incorrect usage. Nothing should be using the engine after it been closed. | private void ensureClusterTableIsUpdated() {
try {
if (0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
} | if (0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) { | private void ensureClusterTableIsUpdated() {
try {
if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final AtomicReference<CairoEngine> engine = new AtomicReference<>();
private final ThreadLocal<SqlCompiler> sqlCompiler;
private final AtomicInteger nullRecords = new AtomicInteger();
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine.set(new CairoEngine(new DefaultCairoConfiguration(dataDir)));
sqlCompiler = ThreadLocal.withInitial(() -> new SqlCompiler(engine.get()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
public int getNullRecordsCount() { return nullRecords.get(); }
@Override
public void gc() {
nullRecords.set(0);
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
public void close() {
synchronized (clusterTable.writeLock) {
CairoEngine myEngine = engine.getAndSet(null);
if (myEngine != null) {
myEngine.close();
}
}
}
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
if (record == null || record.getStr(0) == null) {
nullRecords.incrementAndGet();
continue;
}
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
return sqlCompiler.get().compile(sql, context);
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine.get(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine.get().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final CairoEngine engine;
private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool;
private final AtomicBoolean closed = new AtomicBoolean(false);
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine = new CairoEngine(new DefaultCairoConfiguration(dataDir));
sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
private CairoEngine engine() {
if (closed.get())
throw new IllegalStateException("Attempted to access QuestDb after calling close");
return engine;
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
public void close() {
if (closed.getAndSet(true)) return;
synchronized (nodeTable.writeLock) {
synchronized (clusterTable.writeLock) {
for (SqlCompiler sqlCompiler : sqlCompilerPool)
sqlCompiler.close();
engine.close();
}
}
}
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
SqlCompiler sqlCompiler = sqlCompilerPool.alloc();
try {
return sqlCompiler.compile(sql, context);
} finally {
sqlCompilerPool.free(sqlCompiler);
}
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} |
Consider using temporary file support in junit5 | void test() throws IOException {
int docs = 1 << 14;
String json = "[\n" +
IntStream.range(0, docs).mapToObj(i ->
" {\n" +
" \"id\": \"id:ns:type::abc" + i + "\",\n" +
" \"fields\": {\n" +
" \"lul\":\"lal\"\n" +
" }\n" +
" },\n"
).collect(joining()) +
" {\n" +
" \"id\": \"id:ns:type::abc" + docs + "\",\n" +
" \"fields\": {\n" +
" \"lul\":\"lal\"\n" +
" }\n" +
" }\n" +
"]";
AtomicReference<Throwable> exceptionThrow = new AtomicReference<>();
Path tmpFile = Files.createTempFile(null, null);
Files.write(tmpFile, json.getBytes(UTF_8));
try (InputStream in = Files.newInputStream(tmpFile, StandardOpenOption.READ, StandardOpenOption.DELETE_ON_CLOSE)) {
AtomicInteger resultsReceived = new AtomicInteger();
AtomicBoolean completedSuccessfully = new AtomicBoolean();
Set<String> ids = new HashSet<>();
long startNanos = System.nanoTime();
JsonFeeder.builder(new FeedClient() {
@Override
public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
ids.add(documentId.userSpecific());
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public void close(boolean graceful) { }
private CompletableFuture<Result> createSuccessResult(DocumentId documentId) {
return CompletableFuture.completedFuture(new Result(Result.Type.success, documentId, "success", null));
}
}).build().feedMany(in, 1 << 7, new JsonFeeder.ResultCallback() {
@Override
public void onNextResult(Result result, Throwable error) { resultsReceived.incrementAndGet(); }
@Override
public void onError(Throwable error) { exceptionThrow.set(error); }
@Override
public void onComplete() { completedSuccessfully.set(true); }
}).join();
System.err.println((json.length() / 1048576.0) + " MB in " + (System.nanoTime() - startNanos) * 1e-9 + " seconds");
assertEquals(docs + 1, ids.size());
assertEquals(docs + 1, resultsReceived.get());
assertTrue(completedSuccessfully.get());
assertNull(exceptionThrow.get());
}
} | Path tmpFile = Files.createTempFile(null, null); | void test() throws IOException {
int docs = 1 << 14;
String json = "[\n" +
IntStream.range(0, docs).mapToObj(i ->
" {\n" +
" \"id\": \"id:ns:type::abc" + i + "\",\n" +
" \"fields\": {\n" +
" \"lul\":\"lal\"\n" +
" }\n" +
" },\n"
).collect(joining()) +
" {\n" +
" \"id\": \"id:ns:type::abc" + docs + "\",\n" +
" \"fields\": {\n" +
" \"lul\":\"lal\"\n" +
" }\n" +
" }\n" +
"]";
AtomicReference<Throwable> exceptionThrow = new AtomicReference<>();
Path tmpFile = Files.createTempFile(null, null);
Files.write(tmpFile, json.getBytes(UTF_8));
try (InputStream in = Files.newInputStream(tmpFile, StandardOpenOption.READ, StandardOpenOption.DELETE_ON_CLOSE)) {
AtomicInteger resultsReceived = new AtomicInteger();
AtomicBoolean completedSuccessfully = new AtomicBoolean();
Set<String> ids = new HashSet<>();
long startNanos = System.nanoTime();
JsonFeeder.builder(new FeedClient() {
@Override
public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
ids.add(documentId.userSpecific());
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public OperationStats stats() { return null; }
@Override
public void close(boolean graceful) { }
private CompletableFuture<Result> createSuccessResult(DocumentId documentId) {
return CompletableFuture.completedFuture(new Result(Result.Type.success, documentId, "success", null));
}
}).build().feedMany(in, 1 << 7, new JsonFeeder.ResultCallback() {
@Override
public void onNextResult(Result result, Throwable error) { resultsReceived.incrementAndGet(); }
@Override
public void onError(Throwable error) { exceptionThrow.set(error); }
@Override
public void onComplete() { completedSuccessfully.set(true); }
}).join();
System.err.println((json.length() / 1048576.0) + " MB in " + (System.nanoTime() - startNanos) * 1e-9 + " seconds");
assertEquals(docs + 1, ids.size());
assertEquals(docs + 1, resultsReceived.get());
assertTrue(completedSuccessfully.get());
assertNull(exceptionThrow.get());
}
} | class JsonFeederTest {
@Test
} | class JsonFeederTest {
@Test
} |
Yes, it's run in that one single-threaded executor. | public void dispatch(SimpleHttpRequest request, CompletableFuture<SimpleHttpResponse> vessel) {
requests.incrementAndGet();
long startMillis = System.currentTimeMillis();
delegate.dispatch(request, vessel);
vessel.whenCompleteAsync((response, thrown) -> {
results++;
if (thrown == null) {
responses++;
responsesByCode[response.getCode()]++;
long latency = System.currentTimeMillis() - startMillis;
totalLatencyMillis += latency;
minLatencyMillis = Math.min(minLatencyMillis, latency);
maxLatencyMillis = Math.max(maxLatencyMillis, latency);
bytesSent += request.getBodyBytes() == null ? 0 : request.getBodyBytes().length;
bytesReceived += response.getBodyBytes() == null ? 0 : response.getBodyBytes().length;
}
else
exceptions++;
},
executor);
} | results++; | public void dispatch(SimpleHttpRequest request, CompletableFuture<SimpleHttpResponse> vessel) {
requests.incrementAndGet();
long startMillis = System.currentTimeMillis();
delegate.dispatch(request, vessel);
vessel.whenCompleteAsync((response, thrown) -> {
results++;
if (thrown == null) {
responses++;
responsesByCode[response.getCode()]++;
long latency = System.currentTimeMillis() - startMillis;
totalLatencyMillis += latency;
minLatencyMillis = Math.min(minLatencyMillis, latency);
maxLatencyMillis = Math.max(maxLatencyMillis, latency);
bytesSent += request.getBodyBytes() == null ? 0 : request.getBodyBytes().length;
bytesReceived += response.getBodyBytes() == null ? 0 : response.getBodyBytes().length;
}
else
exceptions++;
},
executor);
} | class BenchmarkingCluster implements Cluster {
private final Cluster delegate;
private final ExecutorService executor = Executors.newSingleThreadExecutor(runnable -> {
Thread thread = new Thread(runnable, "cluster-stats-collector");
thread.setDaemon(true);
return thread;
});
private final AtomicLong requests = new AtomicLong();
private long results = 0;
private long responses = 0;
private final long[] responsesByCode = new long[600];
private long exceptions = 0;
private long totalLatencyMillis = 0;
private long minLatencyMillis = 0;
private long maxLatencyMillis = 0;
private long bytesSent = 0;
private long bytesReceived = 0;
public BenchmarkingCluster(Cluster delegate) {
this.delegate = requireNonNull(delegate);
}
@Override
@Override
public Stats stats() {
try {
try {
return executor.submit(this::getStats).get();
}
catch (RejectedExecutionException ignored) {
executor.awaitTermination(10, TimeUnit.SECONDS);
return getStats();
}
}
catch (InterruptedException | ExecutionException ignored) {
throw new RuntimeException(ignored);
}
}
private Stats getStats() {
Map<Integer, Long> responses = new HashMap<>();
for (int code = 0; code < responsesByCode.length; code++)
if (responsesByCode[code] > 0)
responses.put(code, responsesByCode[code]);
return new Stats(requests.get(),
responses,
exceptions,
requests.get() - results,
totalLatencyMillis / this.responses,
minLatencyMillis,
maxLatencyMillis,
bytesSent,
bytesReceived);
}
@Override
public void close() {
delegate.close();
executor.shutdown();
}
} | class BenchmarkingCluster implements Cluster {
private final Cluster delegate;
private final ExecutorService executor = Executors.newSingleThreadExecutor(runnable -> {
Thread thread = new Thread(runnable, "cluster-stats-collector");
thread.setDaemon(true);
return thread;
});
private final AtomicLong requests = new AtomicLong();
private long results = 0;
private long responses = 0;
private final long[] responsesByCode = new long[600];
private long exceptions = 0;
private long totalLatencyMillis = 0;
private long minLatencyMillis = 0;
private long maxLatencyMillis = 0;
private long bytesSent = 0;
private long bytesReceived = 0;
public BenchmarkingCluster(Cluster delegate) {
this.delegate = requireNonNull(delegate);
}
@Override
@Override
public OperationStats stats() {
try {
try {
return executor.submit(this::getStats).get();
}
catch (RejectedExecutionException ignored) {
executor.awaitTermination(10, TimeUnit.SECONDS);
return getStats();
}
}
catch (InterruptedException | ExecutionException ignored) {
throw new RuntimeException(ignored);
}
}
private OperationStats getStats() {
Map<Integer, Long> responses = new HashMap<>();
for (int code = 0; code < responsesByCode.length; code++)
if (responsesByCode[code] > 0)
responses.put(code, responsesByCode[code]);
return new OperationStats(requests.get(),
responses,
exceptions,
requests.get() - results,
totalLatencyMillis / this.responses,
minLatencyMillis,
maxLatencyMillis,
bytesSent,
bytesReceived);
}
@Override
public void close() {
delegate.close();
executor.shutdown();
}
} |
Yes, good idea. | void test() throws IOException {
int docs = 1 << 14;
String json = "[\n" +
IntStream.range(0, docs).mapToObj(i ->
" {\n" +
" \"id\": \"id:ns:type::abc" + i + "\",\n" +
" \"fields\": {\n" +
" \"lul\":\"lal\"\n" +
" }\n" +
" },\n"
).collect(joining()) +
" {\n" +
" \"id\": \"id:ns:type::abc" + docs + "\",\n" +
" \"fields\": {\n" +
" \"lul\":\"lal\"\n" +
" }\n" +
" }\n" +
"]";
AtomicReference<Throwable> exceptionThrow = new AtomicReference<>();
Path tmpFile = Files.createTempFile(null, null);
Files.write(tmpFile, json.getBytes(UTF_8));
try (InputStream in = Files.newInputStream(tmpFile, StandardOpenOption.READ, StandardOpenOption.DELETE_ON_CLOSE)) {
AtomicInteger resultsReceived = new AtomicInteger();
AtomicBoolean completedSuccessfully = new AtomicBoolean();
Set<String> ids = new HashSet<>();
long startNanos = System.nanoTime();
JsonFeeder.builder(new FeedClient() {
@Override
public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
ids.add(documentId.userSpecific());
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public void close(boolean graceful) { }
private CompletableFuture<Result> createSuccessResult(DocumentId documentId) {
return CompletableFuture.completedFuture(new Result(Result.Type.success, documentId, "success", null));
}
}).build().feedMany(in, 1 << 7, new JsonFeeder.ResultCallback() {
@Override
public void onNextResult(Result result, Throwable error) { resultsReceived.incrementAndGet(); }
@Override
public void onError(Throwable error) { exceptionThrow.set(error); }
@Override
public void onComplete() { completedSuccessfully.set(true); }
}).join();
System.err.println((json.length() / 1048576.0) + " MB in " + (System.nanoTime() - startNanos) * 1e-9 + " seconds");
assertEquals(docs + 1, ids.size());
assertEquals(docs + 1, resultsReceived.get());
assertTrue(completedSuccessfully.get());
assertNull(exceptionThrow.get());
}
} | Path tmpFile = Files.createTempFile(null, null); | void test() throws IOException {
int docs = 1 << 14;
String json = "[\n" +
IntStream.range(0, docs).mapToObj(i ->
" {\n" +
" \"id\": \"id:ns:type::abc" + i + "\",\n" +
" \"fields\": {\n" +
" \"lul\":\"lal\"\n" +
" }\n" +
" },\n"
).collect(joining()) +
" {\n" +
" \"id\": \"id:ns:type::abc" + docs + "\",\n" +
" \"fields\": {\n" +
" \"lul\":\"lal\"\n" +
" }\n" +
" }\n" +
"]";
AtomicReference<Throwable> exceptionThrow = new AtomicReference<>();
Path tmpFile = Files.createTempFile(null, null);
Files.write(tmpFile, json.getBytes(UTF_8));
try (InputStream in = Files.newInputStream(tmpFile, StandardOpenOption.READ, StandardOpenOption.DELETE_ON_CLOSE)) {
AtomicInteger resultsReceived = new AtomicInteger();
AtomicBoolean completedSuccessfully = new AtomicBoolean();
Set<String> ids = new HashSet<>();
long startNanos = System.nanoTime();
JsonFeeder.builder(new FeedClient() {
@Override
public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
ids.add(documentId.userSpecific());
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public OperationStats stats() { return null; }
@Override
public void close(boolean graceful) { }
private CompletableFuture<Result> createSuccessResult(DocumentId documentId) {
return CompletableFuture.completedFuture(new Result(Result.Type.success, documentId, "success", null));
}
}).build().feedMany(in, 1 << 7, new JsonFeeder.ResultCallback() {
@Override
public void onNextResult(Result result, Throwable error) { resultsReceived.incrementAndGet(); }
@Override
public void onError(Throwable error) { exceptionThrow.set(error); }
@Override
public void onComplete() { completedSuccessfully.set(true); }
}).join();
System.err.println((json.length() / 1048576.0) + " MB in " + (System.nanoTime() - startNanos) * 1e-9 + " seconds");
assertEquals(docs + 1, ids.size());
assertEquals(docs + 1, resultsReceived.get());
assertTrue(completedSuccessfully.get());
assertNull(exceptionThrow.get());
}
} | class JsonFeederTest {
@Test
} | class JsonFeederTest {
@Test
} |
This should be `Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS)` in case there are less than 20 hosts parked. Also, using `var` for `int` just reduces readability imo. | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
var parkedHosts = new ArrayList<>(nodeRepository.nodes().list(Node.State.parked)
.nodeType(NodeType.host)
.asList());
parkedHosts.sort(Comparator.comparing(this::getParkedTime));
var hostsToExpire = parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS;
for(int i = 0; i < hostsToExpire; i++) {
var parkedHost = parkedHosts.get(i);
log.info("Allowed number of parked nodes exceeded. Recycling " + parkedHost.hostname());
nodeRepository.nodes().deallocate(parkedHost, Agent.ParkedExpirer, "Expired by ParkedExpirer");
}
return 1.0;
} | var hostsToExpire = parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS; | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
var parkedHosts = new ArrayList<>(nodeRepository.nodes().list(Node.State.parked)
.nodeType(NodeType.host)
.asList());
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
nodeRepository.nodes().list(Node.State.parked).nodeType(NodeType.host)
.sortedBy(Comparator.comparing(this::getParkedTime))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer");
});
return 1.0;
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant getParkedTime(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant getParkedTime(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} |
NodeList has `sortedBy` so you could simplify this to something like: ``` nodeRepository.nodes().list(Node.State.parked).nodeType(NodeType.host) .sortedBy(this::getParkedTime) .first(hostsToExpire) .forEach(host -> { ... }) ``` | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
var parkedHosts = new ArrayList<>(nodeRepository.nodes().list(Node.State.parked)
.nodeType(NodeType.host)
.asList());
parkedHosts.sort(Comparator.comparing(this::getParkedTime));
var hostsToExpire = parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS;
for(int i = 0; i < hostsToExpire; i++) {
var parkedHost = parkedHosts.get(i);
log.info("Allowed number of parked nodes exceeded. Recycling " + parkedHost.hostname());
nodeRepository.nodes().deallocate(parkedHost, Agent.ParkedExpirer, "Expired by ParkedExpirer");
}
return 1.0;
} | .asList()); | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
var parkedHosts = new ArrayList<>(nodeRepository.nodes().list(Node.State.parked)
.nodeType(NodeType.host)
.asList());
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
nodeRepository.nodes().list(Node.State.parked).nodeType(NodeType.host)
.sortedBy(Comparator.comparing(this::getParkedTime))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer");
});
return 1.0;
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant getParkedTime(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant getParkedTime(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} |
This is between 07:00 and 13:59? (Ref. comment above) | private boolean canTriggerAt(Instant instant) {
int hourOfDay = instant.atZone(ZoneOffset.UTC).getHour();
int dayOfWeek = instant.atZone(ZoneOffset.UTC).getDayOfWeek().getValue();
return hourOfDay >= 7 && hourOfDay <= 13 &&
dayOfWeek < 5;
} | return hourOfDay >= 7 && hourOfDay <= 13 && | private boolean canTriggerAt(Instant instant) {
int hourOfDay = instant.atZone(ZoneOffset.UTC).getHour();
int dayOfWeek = instant.atZone(ZoneOffset.UTC).getDayOfWeek().getValue();
return hourOfDay >= 7 && hourOfDay <= 12 &&
dayOfWeek < 5;
} | class OsUpgradeScheduler extends ControllerMaintainer {
/** Trigger a new upgrade when the current target version reaches this age */
private static final Duration MAX_VERSION_AGE = Duration.ofDays(45);
/**
* The interval at which new versions become available. We use this to avoid scheduling upgrades to a version that
* may not be available yet
*/
private static final Duration AVAILABILITY_INTERVAL = Duration.ofDays(7);
private static final DateTimeFormatter VERSION_DATE_PATTERN = DateTimeFormatter.ofPattern("yyyyMMdd");
public OsUpgradeScheduler(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected double maintain() {
Instant now = controller().clock().instant();
if (!canTriggerAt(now)) return 1.0;
for (var cloud : supportedClouds()) {
Optional<Version> newTarget = newTargetIn(cloud, now);
if (newTarget.isEmpty()) continue;
controller().upgradeOsIn(cloud, newTarget.get(), upgradeBudget(), false);
}
return 1.0;
}
/** Returns the new target version for given cloud, if any */
private Optional<Version> newTargetIn(CloudName cloud, Instant now) {
Optional<Version> currentTarget = controller().osVersionTarget(cloud)
.map(OsVersionTarget::osVersion)
.map(OsVersion::version);
if (currentTarget.isEmpty()) return Optional.empty();
if (!hasExpired(currentTarget.get())) return Optional.empty();
String qualifier = LocalDate.ofInstant(now.minus(AVAILABILITY_INTERVAL), ZoneOffset.UTC)
.format(VERSION_DATE_PATTERN);
return Optional.of(new Version(currentTarget.get().getMajor(),
currentTarget.get().getMinor(),
currentTarget.get().getMicro(),
qualifier));
}
/** Returns whether we should upgrade from given version */
private boolean hasExpired(Version version) {
String qualifier = version.getQualifier();
if (!qualifier.matches("^\\d{8,}")) return false;
String dateString = qualifier.substring(0, 8);
Instant now = controller().clock().instant();
Instant versionDate = LocalDate.parse(dateString, VERSION_DATE_PATTERN)
.atStartOfDay(ZoneOffset.UTC)
.toInstant();
return versionDate.isBefore(now.minus(MAX_VERSION_AGE));
}
/** Returns the clouds where we can safely schedule OS upgrades */
private Set<CloudName> supportedClouds() {
return controller().zoneRegistry().zones().reprovisionToUpgradeOs().zones().stream()
.map(ZoneApi::getCloudName)
.collect(Collectors.toUnmodifiableSet());
}
private Duration upgradeBudget() {
return controller().system().isCd() ? Duration.ofHours(1) : Duration.ofDays(14);
}
} | class OsUpgradeScheduler extends ControllerMaintainer {
/** Trigger a new upgrade when the current target version reaches this age */
private static final Duration MAX_VERSION_AGE = Duration.ofDays(45);
/**
* The interval at which new versions become available. We use this to avoid scheduling upgrades to a version that
* may not be available yet
*/
private static final Duration AVAILABILITY_INTERVAL = Duration.ofDays(7);
private static final DateTimeFormatter VERSION_DATE_PATTERN = DateTimeFormatter.ofPattern("yyyyMMdd");
public OsUpgradeScheduler(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected double maintain() {
Instant now = controller().clock().instant();
if (!canTriggerAt(now)) return 1.0;
for (var cloud : supportedClouds()) {
Optional<Version> newTarget = newTargetIn(cloud, now);
if (newTarget.isEmpty()) continue;
controller().upgradeOsIn(cloud, newTarget.get(), upgradeBudget(), false);
}
return 1.0;
}
/** Returns the new target version for given cloud, if any */
private Optional<Version> newTargetIn(CloudName cloud, Instant now) {
Optional<Version> currentTarget = controller().osVersionTarget(cloud)
.map(OsVersionTarget::osVersion)
.map(OsVersion::version);
if (currentTarget.isEmpty()) return Optional.empty();
if (!hasExpired(currentTarget.get())) return Optional.empty();
String qualifier = LocalDate.ofInstant(now.minus(AVAILABILITY_INTERVAL), ZoneOffset.UTC)
.format(VERSION_DATE_PATTERN);
return Optional.of(new Version(currentTarget.get().getMajor(),
currentTarget.get().getMinor(),
currentTarget.get().getMicro(),
qualifier));
}
/** Returns whether we should upgrade from given version */
private boolean hasExpired(Version version) {
String qualifier = version.getQualifier();
if (!qualifier.matches("^\\d{8,}")) return false;
String dateString = qualifier.substring(0, 8);
Instant now = controller().clock().instant();
Instant versionDate = LocalDate.parse(dateString, VERSION_DATE_PATTERN)
.atStartOfDay(ZoneOffset.UTC)
.toInstant();
return versionDate.isBefore(now.minus(MAX_VERSION_AGE));
}
/** Returns the clouds where we can safely schedule OS upgrades */
private Set<CloudName> supportedClouds() {
return controller().zoneRegistry().zones().reprovisionToUpgradeOs().zones().stream()
.map(ZoneApi::getCloudName)
.collect(Collectors.toUnmodifiableSet());
}
private Duration upgradeBudget() {
return controller().system().isCd() ? Duration.ofHours(1) : Duration.ofDays(14);
}
} |
Thanks, fixed. | private boolean canTriggerAt(Instant instant) {
int hourOfDay = instant.atZone(ZoneOffset.UTC).getHour();
int dayOfWeek = instant.atZone(ZoneOffset.UTC).getDayOfWeek().getValue();
return hourOfDay >= 7 && hourOfDay <= 13 &&
dayOfWeek < 5;
} | return hourOfDay >= 7 && hourOfDay <= 13 && | private boolean canTriggerAt(Instant instant) {
int hourOfDay = instant.atZone(ZoneOffset.UTC).getHour();
int dayOfWeek = instant.atZone(ZoneOffset.UTC).getDayOfWeek().getValue();
return hourOfDay >= 7 && hourOfDay <= 12 &&
dayOfWeek < 5;
} | class OsUpgradeScheduler extends ControllerMaintainer {
/** Trigger a new upgrade when the current target version reaches this age */
private static final Duration MAX_VERSION_AGE = Duration.ofDays(45);
/**
* The interval at which new versions become available. We use this to avoid scheduling upgrades to a version that
* may not be available yet
*/
private static final Duration AVAILABILITY_INTERVAL = Duration.ofDays(7);
private static final DateTimeFormatter VERSION_DATE_PATTERN = DateTimeFormatter.ofPattern("yyyyMMdd");
public OsUpgradeScheduler(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected double maintain() {
Instant now = controller().clock().instant();
if (!canTriggerAt(now)) return 1.0;
for (var cloud : supportedClouds()) {
Optional<Version> newTarget = newTargetIn(cloud, now);
if (newTarget.isEmpty()) continue;
controller().upgradeOsIn(cloud, newTarget.get(), upgradeBudget(), false);
}
return 1.0;
}
/** Returns the new target version for given cloud, if any */
private Optional<Version> newTargetIn(CloudName cloud, Instant now) {
Optional<Version> currentTarget = controller().osVersionTarget(cloud)
.map(OsVersionTarget::osVersion)
.map(OsVersion::version);
if (currentTarget.isEmpty()) return Optional.empty();
if (!hasExpired(currentTarget.get())) return Optional.empty();
String qualifier = LocalDate.ofInstant(now.minus(AVAILABILITY_INTERVAL), ZoneOffset.UTC)
.format(VERSION_DATE_PATTERN);
return Optional.of(new Version(currentTarget.get().getMajor(),
currentTarget.get().getMinor(),
currentTarget.get().getMicro(),
qualifier));
}
/** Returns whether we should upgrade from given version */
private boolean hasExpired(Version version) {
String qualifier = version.getQualifier();
if (!qualifier.matches("^\\d{8,}")) return false;
String dateString = qualifier.substring(0, 8);
Instant now = controller().clock().instant();
Instant versionDate = LocalDate.parse(dateString, VERSION_DATE_PATTERN)
.atStartOfDay(ZoneOffset.UTC)
.toInstant();
return versionDate.isBefore(now.minus(MAX_VERSION_AGE));
}
/** Returns the clouds where we can safely schedule OS upgrades */
private Set<CloudName> supportedClouds() {
return controller().zoneRegistry().zones().reprovisionToUpgradeOs().zones().stream()
.map(ZoneApi::getCloudName)
.collect(Collectors.toUnmodifiableSet());
}
private Duration upgradeBudget() {
return controller().system().isCd() ? Duration.ofHours(1) : Duration.ofDays(14);
}
} | class OsUpgradeScheduler extends ControllerMaintainer {
/** Trigger a new upgrade when the current target version reaches this age */
private static final Duration MAX_VERSION_AGE = Duration.ofDays(45);
/**
* The interval at which new versions become available. We use this to avoid scheduling upgrades to a version that
* may not be available yet
*/
private static final Duration AVAILABILITY_INTERVAL = Duration.ofDays(7);
private static final DateTimeFormatter VERSION_DATE_PATTERN = DateTimeFormatter.ofPattern("yyyyMMdd");
public OsUpgradeScheduler(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected double maintain() {
Instant now = controller().clock().instant();
if (!canTriggerAt(now)) return 1.0;
for (var cloud : supportedClouds()) {
Optional<Version> newTarget = newTargetIn(cloud, now);
if (newTarget.isEmpty()) continue;
controller().upgradeOsIn(cloud, newTarget.get(), upgradeBudget(), false);
}
return 1.0;
}
/** Returns the new target version for given cloud, if any */
private Optional<Version> newTargetIn(CloudName cloud, Instant now) {
Optional<Version> currentTarget = controller().osVersionTarget(cloud)
.map(OsVersionTarget::osVersion)
.map(OsVersion::version);
if (currentTarget.isEmpty()) return Optional.empty();
if (!hasExpired(currentTarget.get())) return Optional.empty();
String qualifier = LocalDate.ofInstant(now.minus(AVAILABILITY_INTERVAL), ZoneOffset.UTC)
.format(VERSION_DATE_PATTERN);
return Optional.of(new Version(currentTarget.get().getMajor(),
currentTarget.get().getMinor(),
currentTarget.get().getMicro(),
qualifier));
}
/** Returns whether we should upgrade from given version */
private boolean hasExpired(Version version) {
String qualifier = version.getQualifier();
if (!qualifier.matches("^\\d{8,}")) return false;
String dateString = qualifier.substring(0, 8);
Instant now = controller().clock().instant();
Instant versionDate = LocalDate.parse(dateString, VERSION_DATE_PATTERN)
.atStartOfDay(ZoneOffset.UTC)
.toInstant();
return versionDate.isBefore(now.minus(MAX_VERSION_AGE));
}
/** Returns the clouds where we can safely schedule OS upgrades */
private Set<CloudName> supportedClouds() {
return controller().zoneRegistry().zones().reprovisionToUpgradeOs().zones().stream()
.map(ZoneApi::getCloudName)
.collect(Collectors.toUnmodifiableSet());
}
private Duration upgradeBudget() {
return controller().system().isCd() ? Duration.ofHours(1) : Duration.ofDays(14);
}
} |
Follow the same filename pattern as the others so they are grouped together, i.e. `meta-query.expected.json`? | public void rewrites_query() throws IOException {
assertRewrite("filters-complex.json", "filters-complex.expected.json", Role.reader(TenantName.from("tenant2")));
assertRewrite("filter-in-execution-graph.json",
"filter-in-execution-graph.expected.json",
Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
assertRewrite("filter-in-execution-graph.json",
"filter-in-execution-graph.expected.operator.json",
Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")), Role.hostedOperator());
assertRewrite("no-filters.json",
"no-filters.expected.json",
Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
assertRewrite("meta-query.json",
"filters-meta-query.json",
Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
} | "filters-meta-query.json", | public void rewrites_query() throws IOException {
assertRewrite("filters-complex.json", "filters-complex.expected.json", Role.reader(TenantName.from("tenant2")));
assertRewrite("filter-in-execution-graph.json",
"filter-in-execution-graph.expected.json",
Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
assertRewrite("filter-in-execution-graph.json",
"filter-in-execution-graph.expected.operator.json",
Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")), Role.hostedOperator());
assertRewrite("no-filters.json",
"no-filters.expected.json",
Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
assertRewrite("filters-meta-query.json",
"filters-meta-query.expected.json",
Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
} | class TsdbQueryRewriterTest {
@Test
@Test(expected = TsdbQueryRewriter.UnauthorizedException.class)
public void throws_if_no_roles() throws IOException {
assertRewrite("filters-complex.json", "filters-complex.expected.json");
}
private static void assertRewrite(String initialFilename, String expectedFilename, Role... roles) throws IOException {
byte[] data = Files.readAllBytes(Paths.get("src/test/resources/horizon", initialFilename));
data = TsdbQueryRewriter.rewrite(data, Set.of(roles), SystemName.Public);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
new JsonFormat(false).encode(baos, SlimeUtils.jsonToSlime(data));
String expectedJson = Files.readString(Paths.get("src/test/resources/horizon", expectedFilename));
assertEquals(expectedJson, baos.toString());
}
} | class TsdbQueryRewriterTest {
@Test
@Test(expected = TsdbQueryRewriter.UnauthorizedException.class)
public void throws_if_no_roles() throws IOException {
assertRewrite("filters-complex.json", "filters-complex.expected.json");
}
private static void assertRewrite(String initialFilename, String expectedFilename, Role... roles) throws IOException {
byte[] data = Files.readAllBytes(Paths.get("src/test/resources/horizon", initialFilename));
data = TsdbQueryRewriter.rewrite(data, Set.of(roles), SystemName.Public);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
new JsonFormat(false).encode(baos, SlimeUtils.jsonToSlime(data));
String expectedJson = Files.readString(Paths.get("src/test/resources/horizon", expectedFilename));
assertEquals(expectedJson, baos.toString());
}
} |
I think `AND` is default, the documentation is... inconsistent: http://opentsdb.net/docs/3x/build/html/user_guide/semanticquery/filters.html#chain > Combines one or more filters with a logical AND (the default) or a logical OR. But the table says default is `OR` Logically, we want `AND` when doing suggestions, so I guess `AND` is default then since they are not specifying. In that case, we should change this to `!= null` and let the additional filters be appended to this chain. | private static void rewriteFilter(ObjectNode parent, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
ObjectNode prev = ((ObjectNode) parent.get("filter"));
ArrayNode filters;
if (prev == null || !"Chain".equals(prev.get("type").asText()) || prev.get("op") == null || !"AND".equals(prev.get("op").asText())) {
filters = parent.putObject("filter")
.put("type", "Chain")
.put("op", "AND")
.putArray("filters");
if (prev != null) filters.add(prev);
} else filters = (ArrayNode) prev.get("filters");
ObjectNode systemFilter = filters.addObject();
systemFilter.put("type", "TagValueLiteralOr");
systemFilter.put("filter", systemName.name().toLowerCase());
systemFilter.put("tagKey", "system");
if (!operator) {
ObjectNode appFilter = filters.addObject();
appFilter.put("type", "TagValueRegex");
appFilter.put("filter",
tenantNames.stream().map(TenantName::value).sorted().collect(Collectors.joining("|", "^(", ")\\..*")));
appFilter.put("tagKey", "applicationId");
}
} | if (prev == null || !"Chain".equals(prev.get("type").asText()) || prev.get("op") == null || !"AND".equals(prev.get("op").asText())) { | private static void rewriteFilter(ObjectNode parent, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
ObjectNode prev = ((ObjectNode) parent.get("filter"));
ArrayNode filters;
if (prev == null || !"Chain".equals(prev.get("type").asText()) || prev.get("op") != null && !"AND".equals(prev.get("op").asText())) {
filters = parent.putObject("filter")
.put("type", "Chain")
.put("op", "AND")
.putArray("filters");
if (prev != null) filters.add(prev);
} else filters = (ArrayNode) prev.get("filters");
ObjectNode systemFilter = filters.addObject();
systemFilter.put("type", "TagValueLiteralOr");
systemFilter.put("filter", systemName.name().toLowerCase());
systemFilter.put("tagKey", "system");
if (!operator) {
ObjectNode appFilter = filters.addObject();
appFilter.put("type", "TagValueRegex");
appFilter.put("filter",
tenantNames.stream().map(TenantName::value).sorted().collect(Collectors.joining("|", "^(", ")\\..*")));
appFilter.put("tagKey", "applicationId");
}
} | class TsdbQueryRewriter {
private static final ObjectMapper mapper = new ObjectMapper();
private static final EnumSet<RoleDefinition> operatorRoleDefinitions =
EnumSet.of(RoleDefinition.hostedOperator, RoleDefinition.hostedSupporter);
public static byte[] rewrite(byte[] data, Set<Role> roles, SystemName systemName) throws IOException {
boolean operator = roles.stream().map(Role::definition).anyMatch(operatorRoleDefinitions::contains);
Set<TenantName> authorizedTenants = roles.stream()
.filter(TenantRole.class::isInstance)
.map(role -> ((TenantRole) role).tenant())
.collect(Collectors.toUnmodifiableSet());
if (!operator && authorizedTenants.isEmpty())
throw new UnauthorizedException();
JsonNode root = mapper.readTree(data);
getField(root, "executionGraph", ArrayNode.class)
.ifPresent(graph -> rewriteQueryGraph(graph, authorizedTenants, operator, systemName));
getField(root, "filters", ArrayNode.class)
.ifPresent(filters -> rewriteFilters(filters, authorizedTenants, operator, systemName));
getField(root, "queries", ArrayNode.class)
.ifPresent(graph -> rewriteQueryGraph(graph, authorizedTenants, operator, systemName));
return mapper.writeValueAsBytes(root);
}
private static void rewriteQueryGraph(ArrayNode executionGraph, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
for (int i = 0; i < executionGraph.size(); i++) {
JsonNode execution = executionGraph.get(i);
if (execution.has("filterId")) continue;
rewriteFilter((ObjectNode) execution, tenantNames, operator, systemName);
}
}
private static void rewriteFilters(ArrayNode filters, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
for (int i = 0; i < filters.size(); i++)
rewriteFilter((ObjectNode) filters.get(i), tenantNames, operator, systemName);
}
private static <T extends JsonNode> Optional<T> getField(JsonNode object, String fieldName, Class<T> clazz) {
return Optional.ofNullable(object.get(fieldName)).filter(clazz::isInstance).map(clazz::cast);
}
static class UnauthorizedException extends RuntimeException { }
} | class TsdbQueryRewriter {
private static final ObjectMapper mapper = new ObjectMapper();
private static final EnumSet<RoleDefinition> operatorRoleDefinitions =
EnumSet.of(RoleDefinition.hostedOperator, RoleDefinition.hostedSupporter);
public static byte[] rewrite(byte[] data, Set<Role> roles, SystemName systemName) throws IOException {
boolean operator = roles.stream().map(Role::definition).anyMatch(operatorRoleDefinitions::contains);
Set<TenantName> authorizedTenants = roles.stream()
.filter(TenantRole.class::isInstance)
.map(role -> ((TenantRole) role).tenant())
.collect(Collectors.toUnmodifiableSet());
if (!operator && authorizedTenants.isEmpty())
throw new UnauthorizedException();
JsonNode root = mapper.readTree(data);
getField(root, "executionGraph", ArrayNode.class)
.ifPresent(graph -> rewriteQueryGraph(graph, authorizedTenants, operator, systemName));
getField(root, "filters", ArrayNode.class)
.ifPresent(filters -> rewriteFilters(filters, authorizedTenants, operator, systemName));
getField(root, "queries", ArrayNode.class)
.ifPresent(graph -> rewriteQueryGraph(graph, authorizedTenants, operator, systemName));
return mapper.writeValueAsBytes(root);
}
private static void rewriteQueryGraph(ArrayNode executionGraph, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
for (int i = 0; i < executionGraph.size(); i++) {
JsonNode execution = executionGraph.get(i);
if (execution.has("filterId")) continue;
rewriteFilter((ObjectNode) execution, tenantNames, operator, systemName);
}
}
private static void rewriteFilters(ArrayNode filters, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
for (int i = 0; i < filters.size(); i++)
rewriteFilter((ObjectNode) filters.get(i), tenantNames, operator, systemName);
}
private static <T extends JsonNode> Optional<T> getField(JsonNode object, String fieldName, Class<T> clazz) {
return Optional.ofNullable(object.get(fieldName)).filter(clazz::isInstance).map(clazz::cast);
}
static class UnauthorizedException extends RuntimeException { }
} |
What about `combined`? Use `clusterType.isContent()`? | private double minAdvertisedVcpu(ClusterSpec.Type clusterType) {
if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1;
if (clusterType == ClusterSpec.Type.content && zone().environment().isProduction()) return 1.0;
if (clusterType == ClusterSpec.Type.admin) return 0.1;
return 0.5;
} | if (clusterType == ClusterSpec.Type.content && zone().environment().isProduction()) return 1.0; | private double minAdvertisedVcpu(ClusterSpec.Type clusterType) {
if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1;
if (clusterType.isContent() && zone().environment().isProduction()) return 1.0;
if (clusterType == ClusterSpec.Type.admin) return 0.1;
return 0.5;
} | class NodeResourceLimits {
private final NodeRepository nodeRepository;
public NodeResourceLimits(NodeRepository nodeRepository) {
this.nodeRepository = nodeRepository;
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) {
if (requested.isUnspecified()) return;
if (requested.vcpu() < minAdvertisedVcpu(cluster.type()))
illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu(cluster.type()));
if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type()))
illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type()));
if (requested.diskGb() < minAdvertisedDiskGb(requested, cluster.isExclusive()))
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, cluster.isExclusive()));
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) {
if (candidateNode.type() != NodeType.tenant) return true;
return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository, cluster.isExclusive()),
cluster.type());
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) {
if (realResources.isUnspecified()) return true;
if (realResources.vcpu() < minRealVcpu(clusterType)) return false;
if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false;
if (realResources.diskGb() < minRealDiskGb()) return false;
return true;
}
public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType, boolean exclusive) {
if (requested.isUnspecified()) return requested;
return requested.withVcpu(Math.max(minAdvertisedVcpu(clusterType), requested.vcpu()))
.withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested, exclusive), requested.diskGb()));
}
private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) {
if (zone().system() == SystemName.dev) return 1;
if (clusterType == ClusterSpec.Type.admin) return 1;
return 4;
}
private double minAdvertisedDiskGb(NodeResources requested, boolean exclusive) {
return minRealDiskGb() + getThinPoolSize(requested.storageType(), exclusive);
}
private long getThinPoolSize(NodeResources.StorageType storageType, boolean exclusive) {
if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning())
return nodeRepository.resourcesCalculator().thinPoolSizeInBase2Gb(NodeType.host, ! exclusive);
else
return 4;
}
private double minRealVcpu(ClusterSpec.Type clusterType) { return minAdvertisedVcpu(clusterType); }
private double minRealMemoryGb(ClusterSpec.Type clusterType) {
return minAdvertisedMemoryGb(clusterType) - 1.7;
}
private double minRealDiskGb() { return 6; }
private Zone zone() { return nodeRepository.zone(); }
private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) {
if ( ! unit.isEmpty())
unit = " " + unit;
String message = String.format(Locale.ENGLISH,
"%s cluster '%s': " + type + " " + resource +
" size is %.2f%s but must be at least %.2f%s",
cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit);
throw new IllegalArgumentException(message);
}
} | class NodeResourceLimits {
private final NodeRepository nodeRepository;
public NodeResourceLimits(NodeRepository nodeRepository) {
this.nodeRepository = nodeRepository;
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) {
if (requested.isUnspecified()) return;
if (requested.vcpu() < minAdvertisedVcpu(cluster.type()))
illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu(cluster.type()));
if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type()))
illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type()));
if (requested.diskGb() < minAdvertisedDiskGb(requested, cluster.isExclusive()))
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, cluster.isExclusive()));
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) {
if (candidateNode.type() != NodeType.tenant) return true;
return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository, cluster.isExclusive()),
cluster.type());
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) {
if (realResources.isUnspecified()) return true;
if (realResources.vcpu() < minRealVcpu(clusterType)) return false;
if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false;
if (realResources.diskGb() < minRealDiskGb()) return false;
return true;
}
public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType, boolean exclusive) {
if (requested.isUnspecified()) return requested;
return requested.withVcpu(Math.max(minAdvertisedVcpu(clusterType), requested.vcpu()))
.withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested, exclusive), requested.diskGb()));
}
private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) {
if (zone().system() == SystemName.dev) return 1;
if (clusterType == ClusterSpec.Type.admin) return 1;
return 4;
}
private double minAdvertisedDiskGb(NodeResources requested, boolean exclusive) {
return minRealDiskGb() + getThinPoolSize(requested.storageType(), exclusive);
}
private long getThinPoolSize(NodeResources.StorageType storageType, boolean exclusive) {
if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning())
return nodeRepository.resourcesCalculator().thinPoolSizeInBase2Gb(NodeType.host, ! exclusive);
else
return 4;
}
private double minRealVcpu(ClusterSpec.Type clusterType) { return minAdvertisedVcpu(clusterType); }
private double minRealMemoryGb(ClusterSpec.Type clusterType) {
return minAdvertisedMemoryGb(clusterType) - 1.7;
}
private double minRealDiskGb() { return 6; }
private Zone zone() { return nodeRepository.zone(); }
private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) {
if ( ! unit.isEmpty())
unit = " " + unit;
String message = String.format(Locale.ENGLISH,
"%s cluster '%s': " + type + " " + resource +
" size is %.2f%s but must be at least %.2f%s",
cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit);
throw new IllegalArgumentException(message);
}
} |
👍 done | private double minAdvertisedVcpu(ClusterSpec.Type clusterType) {
if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1;
if (clusterType == ClusterSpec.Type.content && zone().environment().isProduction()) return 1.0;
if (clusterType == ClusterSpec.Type.admin) return 0.1;
return 0.5;
} | if (clusterType == ClusterSpec.Type.content && zone().environment().isProduction()) return 1.0; | private double minAdvertisedVcpu(ClusterSpec.Type clusterType) {
if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1;
if (clusterType.isContent() && zone().environment().isProduction()) return 1.0;
if (clusterType == ClusterSpec.Type.admin) return 0.1;
return 0.5;
} | class NodeResourceLimits {
private final NodeRepository nodeRepository;
public NodeResourceLimits(NodeRepository nodeRepository) {
this.nodeRepository = nodeRepository;
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) {
if (requested.isUnspecified()) return;
if (requested.vcpu() < minAdvertisedVcpu(cluster.type()))
illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu(cluster.type()));
if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type()))
illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type()));
if (requested.diskGb() < minAdvertisedDiskGb(requested, cluster.isExclusive()))
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, cluster.isExclusive()));
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) {
if (candidateNode.type() != NodeType.tenant) return true;
return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository, cluster.isExclusive()),
cluster.type());
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) {
if (realResources.isUnspecified()) return true;
if (realResources.vcpu() < minRealVcpu(clusterType)) return false;
if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false;
if (realResources.diskGb() < minRealDiskGb()) return false;
return true;
}
public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType, boolean exclusive) {
if (requested.isUnspecified()) return requested;
return requested.withVcpu(Math.max(minAdvertisedVcpu(clusterType), requested.vcpu()))
.withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested, exclusive), requested.diskGb()));
}
private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) {
if (zone().system() == SystemName.dev) return 1;
if (clusterType == ClusterSpec.Type.admin) return 1;
return 4;
}
private double minAdvertisedDiskGb(NodeResources requested, boolean exclusive) {
return minRealDiskGb() + getThinPoolSize(requested.storageType(), exclusive);
}
private long getThinPoolSize(NodeResources.StorageType storageType, boolean exclusive) {
if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning())
return nodeRepository.resourcesCalculator().thinPoolSizeInBase2Gb(NodeType.host, ! exclusive);
else
return 4;
}
private double minRealVcpu(ClusterSpec.Type clusterType) { return minAdvertisedVcpu(clusterType); }
private double minRealMemoryGb(ClusterSpec.Type clusterType) {
return minAdvertisedMemoryGb(clusterType) - 1.7;
}
private double minRealDiskGb() { return 6; }
private Zone zone() { return nodeRepository.zone(); }
private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) {
if ( ! unit.isEmpty())
unit = " " + unit;
String message = String.format(Locale.ENGLISH,
"%s cluster '%s': " + type + " " + resource +
" size is %.2f%s but must be at least %.2f%s",
cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit);
throw new IllegalArgumentException(message);
}
} | class NodeResourceLimits {
private final NodeRepository nodeRepository;
public NodeResourceLimits(NodeRepository nodeRepository) {
this.nodeRepository = nodeRepository;
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) {
if (requested.isUnspecified()) return;
if (requested.vcpu() < minAdvertisedVcpu(cluster.type()))
illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu(cluster.type()));
if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type()))
illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type()));
if (requested.diskGb() < minAdvertisedDiskGb(requested, cluster.isExclusive()))
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, cluster.isExclusive()));
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) {
if (candidateNode.type() != NodeType.tenant) return true;
return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository, cluster.isExclusive()),
cluster.type());
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) {
if (realResources.isUnspecified()) return true;
if (realResources.vcpu() < minRealVcpu(clusterType)) return false;
if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false;
if (realResources.diskGb() < minRealDiskGb()) return false;
return true;
}
public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType, boolean exclusive) {
if (requested.isUnspecified()) return requested;
return requested.withVcpu(Math.max(minAdvertisedVcpu(clusterType), requested.vcpu()))
.withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested, exclusive), requested.diskGb()));
}
private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) {
if (zone().system() == SystemName.dev) return 1;
if (clusterType == ClusterSpec.Type.admin) return 1;
return 4;
}
private double minAdvertisedDiskGb(NodeResources requested, boolean exclusive) {
return minRealDiskGb() + getThinPoolSize(requested.storageType(), exclusive);
}
private long getThinPoolSize(NodeResources.StorageType storageType, boolean exclusive) {
if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning())
return nodeRepository.resourcesCalculator().thinPoolSizeInBase2Gb(NodeType.host, ! exclusive);
else
return 4;
}
private double minRealVcpu(ClusterSpec.Type clusterType) { return minAdvertisedVcpu(clusterType); }
private double minRealMemoryGb(ClusterSpec.Type clusterType) {
return minAdvertisedMemoryGb(clusterType) - 1.7;
}
private double minRealDiskGb() { return 6; }
private Zone zone() { return nodeRepository.zone(); }
private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) {
if ( ! unit.isEmpty())
unit = " " + unit;
String message = String.format(Locale.ENGLISH,
"%s cluster '%s': " + type + " " + resource +
" size is %.2f%s but must be at least %.2f%s",
cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit);
throw new IllegalArgumentException(message);
}
} |
Let's remember to add doc id here. | public void onNextResult(Result result, Throwable error) {
resultsReceived.incrementAndGet();
if (error != null) {
log.warning("Problems with feeding document");
errorsReceived.incrementAndGet();
} else if (result.type() == Result.Type.failure) {
log.warning("Problems with docID " + result.documentId() + ":" + error);
errorsReceived.incrementAndGet();
}
} | log.warning("Problems with feeding document"); | public void onNextResult(Result result, Throwable error) {
resultsReceived.incrementAndGet();
if (error != null) {
log.warning("Problems with feeding document");
errorsReceived.incrementAndGet();
} else if (result.type() == Result.Type.failure) {
log.warning("Problems with docID " + result.documentId() + ":" + error);
errorsReceived.incrementAndGet();
}
} | class ResultCallBack implements JsonFeeder.ResultCallback {
final AtomicInteger resultsReceived = new AtomicInteger(0);
final AtomicInteger errorsReceived = new AtomicInteger(0);
final long startTimeMillis = System.currentTimeMillis();;
@Override
@Override
public void onError(Throwable error) {
log.severe("Feeding failed: " + error.getMessage());
}
@Override
public void onComplete() {
log.info("Feeding completed");
}
void dumpStatsToLog() {
log.info("Received in total " + resultsReceived.get() + ", " + errorsReceived.get() + " errors.");
log.info("Time spent receiving is " + (System.currentTimeMillis() - startTimeMillis) + " ms.");
}
} | class ResultCallBack implements JsonFeeder.ResultCallback {
final AtomicInteger resultsReceived = new AtomicInteger(0);
final AtomicInteger errorsReceived = new AtomicInteger(0);
final long startTimeMillis = System.currentTimeMillis();;
@Override
@Override
public void onError(Throwable error) {
log.severe("Feeding failed: " + error.getMessage());
}
@Override
public void onComplete() {
log.info("Feeding completed");
}
void dumpStatsToLog() {
log.info("Received in total " + resultsReceived.get() + ", " + errorsReceived.get() + " errors.");
log.info("Time spent receiving is " + (System.currentTimeMillis() - startTimeMillis) + " ms.");
}
} |
You should add a similar warning if 'min-group-coverage' has been configured. | public static DispatchTuning build(ModelElement contentXml, DeployLogger logger) {
DispatchTuning.Builder builder = new DispatchTuning.Builder();
ModelElement tuningElement = contentXml.child("tuning");
if (tuningElement == null) {
return builder.build();
}
ModelElement dispatchElement = tuningElement.child("dispatch");
if (dispatchElement == null) {
return builder.build();
}
builder.setMaxHitsPerPartition(dispatchElement.childAsInteger("max-hits-per-partition"));
builder.setTopKProbability(dispatchElement.childAsDouble("top-k-probability"));
builder.setDispatchPolicy(dispatchElement.childAsString("dispatch-policy"));
builder.setMinActiveDocsCoverage(dispatchElement.childAsDouble("min-active-docs-coverage"));
if (dispatchElement.child("use-local-node") != null)
logger.logApplicationPackage(Level.WARNING, "Attribute 'use-local-node' is deprecated and ignored: " +
"The local node will automatically be preferred when appropriate.");
return builder.build();
} | if (dispatchElement.child("use-local-node") != null) | public static DispatchTuning build(ModelElement contentXml, DeployLogger logger) {
DispatchTuning.Builder builder = new DispatchTuning.Builder();
ModelElement tuningElement = contentXml.child("tuning");
if (tuningElement == null) {
return builder.build();
}
ModelElement dispatchElement = tuningElement.child("dispatch");
if (dispatchElement == null) {
return builder.build();
}
builder.setMaxHitsPerPartition(dispatchElement.childAsInteger("max-hits-per-partition"));
builder.setTopKProbability(dispatchElement.childAsDouble("top-k-probability"));
builder.setDispatchPolicy(dispatchElement.childAsString("dispatch-policy"));
builder.setMinActiveDocsCoverage(dispatchElement.childAsDouble("min-active-docs-coverage"));
if (dispatchElement.child("min-group-coverage") != null)
logger.logApplicationPackage(Level.WARNING, "Attribute 'min-group-coverage' is deprecated and ignored: " +
"Use min-active-docs-coverage instead.");
if (dispatchElement.child("use-local-node") != null)
logger.logApplicationPackage(Level.WARNING, "Attribute 'use-local-node' is deprecated and ignored: " +
"The local node will automatically be preferred when appropriate.");
return builder.build();
} | class DomTuningDispatchBuilder {
} | class DomTuningDispatchBuilder {
} |
Good point; done | public static DispatchTuning build(ModelElement contentXml, DeployLogger logger) {
DispatchTuning.Builder builder = new DispatchTuning.Builder();
ModelElement tuningElement = contentXml.child("tuning");
if (tuningElement == null) {
return builder.build();
}
ModelElement dispatchElement = tuningElement.child("dispatch");
if (dispatchElement == null) {
return builder.build();
}
builder.setMaxHitsPerPartition(dispatchElement.childAsInteger("max-hits-per-partition"));
builder.setTopKProbability(dispatchElement.childAsDouble("top-k-probability"));
builder.setDispatchPolicy(dispatchElement.childAsString("dispatch-policy"));
builder.setMinActiveDocsCoverage(dispatchElement.childAsDouble("min-active-docs-coverage"));
if (dispatchElement.child("use-local-node") != null)
logger.logApplicationPackage(Level.WARNING, "Attribute 'use-local-node' is deprecated and ignored: " +
"The local node will automatically be preferred when appropriate.");
return builder.build();
} | if (dispatchElement.child("use-local-node") != null) | public static DispatchTuning build(ModelElement contentXml, DeployLogger logger) {
DispatchTuning.Builder builder = new DispatchTuning.Builder();
ModelElement tuningElement = contentXml.child("tuning");
if (tuningElement == null) {
return builder.build();
}
ModelElement dispatchElement = tuningElement.child("dispatch");
if (dispatchElement == null) {
return builder.build();
}
builder.setMaxHitsPerPartition(dispatchElement.childAsInteger("max-hits-per-partition"));
builder.setTopKProbability(dispatchElement.childAsDouble("top-k-probability"));
builder.setDispatchPolicy(dispatchElement.childAsString("dispatch-policy"));
builder.setMinActiveDocsCoverage(dispatchElement.childAsDouble("min-active-docs-coverage"));
if (dispatchElement.child("min-group-coverage") != null)
logger.logApplicationPackage(Level.WARNING, "Attribute 'min-group-coverage' is deprecated and ignored: " +
"Use min-active-docs-coverage instead.");
if (dispatchElement.child("use-local-node") != null)
logger.logApplicationPackage(Level.WARNING, "Attribute 'use-local-node' is deprecated and ignored: " +
"The local node will automatically be preferred when appropriate.");
return builder.build();
} | class DomTuningDispatchBuilder {
} | class DomTuningDispatchBuilder {
} |
This just changes the node state, need to actually make a call to AWS to remove the instance, see https://github.com/vespa-engine/vespa/blob/8ef499e16e9fb5daede071d36cb523f4d30538c0/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainer.java#L134:L135 | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().matching(this::parkedByOperator)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now());
});
return 1.0;
} | nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now()); | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer");
});
return 1.0;
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
private boolean parkedByOperator(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::agent)
.map(Agent.operator::equals)
.orElse(false);
}
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} |
Won't `DynamicProvisioningMaintainer` make the call to AWS, as the hosts are set to `wantToDeprovision` here? | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().matching(this::parkedByOperator)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now());
});
return 1.0;
} | nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now()); | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer");
});
return 1.0;
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
private boolean parkedByOperator(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::agent)
.map(Agent.operator::equals)
.orElse(false);
}
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} |
This creates an anonymous subclass of `HashMap`, which is not ideal. Consider doing `new HashMap<>(Map.of(...))` instead. Consider also extracting a variable for `System.currentTimeMillis` so that both versions have the same initial timestamp. | public AclMaintainer(ContainerOperations containerOperations, IPAddresses ipAddresses, Metrics metrics) {
this.containerOperations = containerOperations;
this.ipAddresses = ipAddresses;
this.metrics = metrics;
this.lastSuccess = new HashMap<>(){{
put(IPVersion.IPv4.id(), System.currentTimeMillis());
put(IPVersion.IPv6.id(), System.currentTimeMillis());
}};
} | this.lastSuccess = new HashMap<>(){{ | public AclMaintainer(ContainerOperations containerOperations, IPAddresses ipAddresses, Metrics metrics) {
this.containerOperations = containerOperations;
this.ipAddresses = ipAddresses;
this.metrics = metrics;
long timestamp = System.currentTimeMillis() / 1_000;
this.lastSuccess = new HashMap<>(Map.of(IPVersion.IPv4.id(), timestamp,
IPVersion.IPv6.id(), timestamp));
} | class AclMaintainer {
private static final Logger logger = Logger.getLogger(AclMaintainer.class.getName());
private final ContainerOperations containerOperations;
private final IPAddresses ipAddresses;
private final Metrics metrics;
private final Map<String, Long> lastSuccess;
private static final String METRIC_NAME_POSTFIX = ".acl.age";
public synchronized void converge(NodeAgentContext context) {
if (context.isDisabled(NodeAgentTask.AclMaintainer)) return;
boolean updatedIPv4 = editFlushOnError(context, IPVersion.IPv4, "filter", FilterTableLineEditor.from(context.acl(), IPVersion.IPv4));
boolean updatedIPv6 = editFlushOnError(context, IPVersion.IPv6, "filter", FilterTableLineEditor.from(context.acl(), IPVersion.IPv6));
updateMetric(context, updatedIPv4, IPVersion.IPv4.id());
updateMetric(context, updatedIPv6, IPVersion.IPv6.id());
ipAddresses.getAddress(context.hostname().value(), IPVersion.IPv4).ifPresent(addr -> applyRedirect(context, addr));
ipAddresses.getAddress(context.hostname().value(), IPVersion.IPv6).ifPresent(addr -> applyRedirect(context, addr));
}
private void applyRedirect(NodeAgentContext context, InetAddress address) {
IPVersion ipVersion = IPVersion.get(address);
String redirectRule = "-A OUTPUT -d " + InetAddresses.toAddrString(address) + ipVersion.singleHostCidr() + " -j REDIRECT";
editLogOnError(context, ipVersion, "nat", NatTableLineEditor.from(redirectRule));
}
private boolean editFlushOnError(NodeAgentContext context, IPVersion ipVersion, String table, LineEditor lineEditor) {
return edit(context, table, ipVersion, lineEditor, true);
}
private boolean editLogOnError(NodeAgentContext context, IPVersion ipVersion, String table, LineEditor lineEditor) {
return edit(context, table, ipVersion, lineEditor, false);
}
private boolean edit(NodeAgentContext context, String table, IPVersion ipVersion, LineEditor lineEditor, boolean flush) {
Editor editor = new Editor(
ipVersion.iptablesCmd() + "-" + table,
listTable(context, table, ipVersion),
restoreTable(context, table, ipVersion, flush),
lineEditor);
return editor.edit(message -> context.log(logger, message));
}
private Supplier<List<String>> listTable(NodeAgentContext context, String table, IPVersion ipVersion) {
return () -> containerOperations
.executeCommandInNetworkNamespace(context, ipVersion.iptablesCmd(), "-S", "-t", table)
.mapEachLine(String::trim);
}
private Consumer<List<String>> restoreTable(NodeAgentContext context, String table, IPVersion ipVersion, boolean flush) {
return list -> {
try (TemporaryIpTablesFileHandler fileHandler = new TemporaryIpTablesFileHandler(table)) {
String rules = String.join("\n", list);
String fileContent = "*" + table + "\n" + rules + "\nCOMMIT\n";
fileHandler.writeUtf8Content(fileContent);
containerOperations.executeCommandInNetworkNamespace(context, ipVersion.iptablesRestore(), fileHandler.absolutePath());
} catch (Exception e) {
if (flush) {
context.log(logger, Level.SEVERE, "Exception occurred while syncing iptable " + table + ", attempting rollback", e);
try {
containerOperations.executeCommandInNetworkNamespace(context, ipVersion.iptablesCmd(), "-F", "-t", table);
} catch (Exception ne) {
context.log(logger, Level.SEVERE, "Rollback of table " + table + " failed, giving up", ne);
}
} else {
context.log(logger, Level.WARNING, "Unable to sync iptables for " + table, e);
}
}
};
}
void updateMetric(NodeAgentContext context, boolean updated, String ipVersion) {
Dimensions dimensions = generateDimensions(context);
if (!updated) {
metrics.declareGauge(Metrics.APPLICATION_NODE, ipVersion + METRIC_NAME_POSTFIX, dimensions, Metrics.DimensionType.PRETAGGED)
.sample((System.currentTimeMillis() - lastSuccess.get(ipVersion)) / 1000);
return;
}
metrics.declareGauge(Metrics.APPLICATION_NODE, ipVersion + METRIC_NAME_POSTFIX, dimensions, Metrics.DimensionType.PRETAGGED)
.sample(0);
lastSuccess.put(ipVersion, System.currentTimeMillis());
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("zone", context.zone().getId().value());
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
private static class TemporaryIpTablesFileHandler implements AutoCloseable {
private final Path path;
private TemporaryIpTablesFileHandler(String table) {
this.path = uncheck(() -> Files.createTempFile("iptables-restore", "." + table));
}
private void writeUtf8Content(String content) throws IOException {
Files.write(path, content.getBytes(StandardCharsets.UTF_8));
}
private String absolutePath() {
return path.toAbsolutePath().toString();
}
@Override
public void close() throws IOException {
Files.deleteIfExists(path);
}
}
} | class AclMaintainer {
private static final Logger logger = Logger.getLogger(AclMaintainer.class.getName());
private final ContainerOperations containerOperations;
private final IPAddresses ipAddresses;
private final Metrics metrics;
private final Map<String, Long> lastSuccess;
private static final String METRIC_NAME_POSTFIX = ".acl.age";
public synchronized void converge(NodeAgentContext context) {
if (context.isDisabled(NodeAgentTask.AclMaintainer)) return;
boolean updatedIPv4 = editFlushOnError(context, IPVersion.IPv4, "filter", FilterTableLineEditor.from(context.acl(), IPVersion.IPv4));
boolean updatedIPv6 = editFlushOnError(context, IPVersion.IPv6, "filter", FilterTableLineEditor.from(context.acl(), IPVersion.IPv6));
Dimensions dimensions = generateDimensions(context);
updateMetric(dimensions, updatedIPv4, IPVersion.IPv4.id());
updateMetric(dimensions, updatedIPv6, IPVersion.IPv6.id());
ipAddresses.getAddress(context.hostname().value(), IPVersion.IPv4).ifPresent(addr -> applyRedirect(context, addr));
ipAddresses.getAddress(context.hostname().value(), IPVersion.IPv6).ifPresent(addr -> applyRedirect(context, addr));
}
private void applyRedirect(NodeAgentContext context, InetAddress address) {
IPVersion ipVersion = IPVersion.get(address);
String redirectRule = "-A OUTPUT -d " + InetAddresses.toAddrString(address) + ipVersion.singleHostCidr() + " -j REDIRECT";
editLogOnError(context, ipVersion, "nat", NatTableLineEditor.from(redirectRule));
}
private boolean editFlushOnError(NodeAgentContext context, IPVersion ipVersion, String table, LineEditor lineEditor) {
return edit(context, table, ipVersion, lineEditor, true);
}
private boolean editLogOnError(NodeAgentContext context, IPVersion ipVersion, String table, LineEditor lineEditor) {
return edit(context, table, ipVersion, lineEditor, false);
}
private boolean edit(NodeAgentContext context, String table, IPVersion ipVersion, LineEditor lineEditor, boolean flush) {
Editor editor = new Editor(
ipVersion.iptablesCmd() + "-" + table,
listTable(context, table, ipVersion),
restoreTable(context, table, ipVersion, flush),
lineEditor);
return editor.edit(message -> context.log(logger, message));
}
private Supplier<List<String>> listTable(NodeAgentContext context, String table, IPVersion ipVersion) {
return () -> containerOperations
.executeCommandInNetworkNamespace(context, ipVersion.iptablesCmd(), "-S", "-t", table)
.mapEachLine(String::trim);
}
private Consumer<List<String>> restoreTable(NodeAgentContext context, String table, IPVersion ipVersion, boolean flush) {
return list -> {
try (TemporaryIpTablesFileHandler fileHandler = new TemporaryIpTablesFileHandler(table)) {
String rules = String.join("\n", list);
String fileContent = "*" + table + "\n" + rules + "\nCOMMIT\n";
fileHandler.writeUtf8Content(fileContent);
containerOperations.executeCommandInNetworkNamespace(context, ipVersion.iptablesRestore(), fileHandler.absolutePath());
} catch (Exception e) {
if (flush) {
context.log(logger, Level.SEVERE, "Exception occurred while syncing iptable " + table + ", attempting rollback", e);
try {
containerOperations.executeCommandInNetworkNamespace(context, ipVersion.iptablesCmd(), "-F", "-t", table);
} catch (Exception ne) {
context.log(logger, Level.SEVERE, "Rollback of table " + table + " failed, giving up", ne);
}
} else {
context.log(logger, Level.WARNING, "Unable to sync iptables for " + table, e);
}
}
};
}
void updateMetric(Dimensions dimensions, boolean updated, String ipVersion) {
long updateAgeInSec;
long timestamp = System.currentTimeMillis() / 1_000;
if (updated) {
updateAgeInSec = 0;
lastSuccess.put(ipVersion, timestamp);
} else {
updateAgeInSec = timestamp - lastSuccess.get(ipVersion);
}
metrics.declareGauge(Metrics.APPLICATION_NODE, ipVersion + METRIC_NAME_POSTFIX, dimensions, Metrics.DimensionType.PRETAGGED)
.sample(updateAgeInSec);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("zone", context.zone().getId().value());
node.currentVespaVersion().ifPresent(vespaVersion -> dimensionsBuilder.add("vespaVersion", vespaVersion.toFullString()));
return dimensionsBuilder.build();
}
private static class TemporaryIpTablesFileHandler implements AutoCloseable {
private final Path path;
private TemporaryIpTablesFileHandler(String table) {
this.path = uncheck(() -> Files.createTempFile("iptables-restore", "." + table));
}
private void writeUtf8Content(String content) throws IOException {
Files.write(path, content.getBytes(StandardCharsets.UTF_8));
}
private String absolutePath() {
return path.toAbsolutePath().toString();
}
@Override
public void close() throws IOException {
Files.deleteIfExists(path);
}
}
} |
Seems to me like this is equal to setting the host to `dirty`, both of these just modify the second condition for DPM to deprovision and neither actually helps if there are allocated children? | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().matching(this::parkedByOperator)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now());
});
return 1.0;
} | nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now()); | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer");
});
return 1.0;
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
private boolean parkedByOperator(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::agent)
.map(Agent.operator::equals)
.orElse(false);
}
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} |
Do we want to expire non-empty hosts at all though? If I remember correctly we're adding this to handle hosts expired by ProvisionedExpirer (which will always be empty). | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().matching(this::parkedByOperator)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now());
});
return 1.0;
} | nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now()); | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer");
});
return 1.0;
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
private boolean parkedByOperator(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::agent)
.map(Agent.operator::equals)
.orElse(false);
}
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} |
In that case, do we even need this maintainer? Cant ProvisionedExpirer just delete them directly? | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().matching(this::parkedByOperator)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now());
});
return 1.0;
} | nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now()); | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer");
});
return 1.0;
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
private boolean parkedByOperator(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::agent)
.map(Agent.operator::equals)
.orElse(false);
}
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} |
True, ProvisionedExpirer could do it instead. Park as it does today, but mark excess hosts wantToDeprovision could work. | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().matching(this::parkedByOperator)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now());
});
return 1.0;
} | nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now()); | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer");
});
return 1.0;
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
private boolean parkedByOperator(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::agent)
.map(Agent.operator::equals)
.orElse(false);
}
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} |
The reason it became a separate maintainer is we wanted to remove the oldest parked hosts, but I suppose that's not really required. If anything, it could be an annoyance that we continuously rotate the set of parked hosts. | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().matching(this::parkedByOperator)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now());
});
return 1.0;
} | nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now()); | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer");
});
return 1.0;
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
private boolean parkedByOperator(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::agent)
.map(Agent.operator::equals)
.orElse(false);
}
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} |
Yes. Let's just keep the oldest ones for now. So basically: 1. ProvisionedExpirer parks hosts that have been too long in provisioned (same as today) 2. ProvisionedExpirer sets wantToDeprovision on the host to be parked if we already have >=20 hosts parked by Agent.ProvisionedExpirer | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().matching(this::parkedByOperator)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now());
});
return 1.0;
} | nodeRepository.nodes().deprovision(host.hostname(), Agent.ParkedExpirer, Instant.now()); | protected double maintain() {
if (!nodeRepository.zone().getCloud().dynamicProvisioning())
return 1.0;
NodeList parkedHosts = nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.not().deprovisioning();
int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
.first(hostsToExpire)
.forEach(host -> {
log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer");
});
return 1.0;
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
private boolean parkedByOperator(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::agent)
.map(Agent.operator::equals)
.orElse(false);
}
} | class ParkedExpirer extends NodeRepositoryMaintainer {
private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
private final NodeRepository nodeRepository;
ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(nodeRepository, interval, metric);
this.nodeRepository = nodeRepository;
}
@Override
private Instant parkedAt(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::at)
.orElse(Instant.EPOCH);
}
} |
Should use `nodeRepository.clock().instant()` instead of `Instant.now()`. | protected void expire(List<Node> expired) {
int previouslyExpired = numberOfPreviouslyExpired();
for (Node expiredNode : expired) {
if (expiredNode.type() != NodeType.host)
continue;
nodeRepository().nodes().parkRecursively(expiredNode.hostname(), Agent.ProvisionedExpirer, "Node is stuck in provisioned");
if (MAXIMUM_ALLOWED_EXPIRED_HOSTS < ++previouslyExpired) {
nodeRepository.nodes().deprovision(expiredNode.hostname(), Agent.ProvisionedExpirer, Instant.now());
}
}
} | nodeRepository.nodes().deprovision(expiredNode.hostname(), Agent.ProvisionedExpirer, Instant.now()); | protected void expire(List<Node> expired) {
int previouslyExpired = numberOfPreviouslyExpired();
for (Node expiredNode : expired) {
if (expiredNode.type() != NodeType.host)
continue;
nodeRepository().nodes().parkRecursively(expiredNode.hostname(), Agent.ProvisionedExpirer, "Node is stuck in provisioned");
if (MAXIMUM_ALLOWED_EXPIRED_HOSTS < ++previouslyExpired) {
nodeRepository.nodes().deprovision(expiredNode.hostname(), Agent.ProvisionedExpirer, nodeRepository.clock().instant());
}
}
} | class ProvisionedExpirer extends Expirer {
private final NodeRepository nodeRepository;
private static final int MAXIMUM_ALLOWED_EXPIRED_HOSTS = 20;
ProvisionedExpirer(NodeRepository nodeRepository, Duration dirtyTimeout, Metric metric) {
super(Node.State.provisioned, History.Event.Type.provisioned, nodeRepository, dirtyTimeout, metric);
this.nodeRepository = nodeRepository;
}
@Override
private int numberOfPreviouslyExpired() {
return nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.matching(this::parkedByProvisionedExpirer)
.not().deprovisioning()
.size();
}
private boolean parkedByProvisionedExpirer(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::agent)
.map(Agent.ProvisionedExpirer::equals)
.orElse(false);
}
} | class ProvisionedExpirer extends Expirer {
private final NodeRepository nodeRepository;
private static final int MAXIMUM_ALLOWED_EXPIRED_HOSTS = 20;
ProvisionedExpirer(NodeRepository nodeRepository, Duration dirtyTimeout, Metric metric) {
super(Node.State.provisioned, History.Event.Type.provisioned, nodeRepository, dirtyTimeout, metric);
this.nodeRepository = nodeRepository;
}
@Override
private int numberOfPreviouslyExpired() {
return nodeRepository.nodes()
.list(Node.State.parked)
.nodeType(NodeType.host)
.matching(this::parkedByProvisionedExpirer)
.not().deprovisioning()
.size();
}
private boolean parkedByProvisionedExpirer(Node node) {
return node.history().event(History.Event.Type.parked)
.map(History.Event::agent)
.map(Agent.ProvisionedExpirer::equals)
.orElse(false);
}
} |
We should probably also do the other cleanup, like removing DNS entries? | public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
var existingInstances = application.get().instances().keySet();
var declaredInstances = applicationPackage.deploymentSpec().instanceNames();
for (var name : declaredInstances)
if ( ! existingInstances.contains(name))
application = withNewInstance(application, application.get().id().instance(name));
for (InstanceName name : existingInstances) {
application = withoutDeletedDeployments(application, name);
}
for (InstanceName instance : declaredInstances)
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
controller.jobController().deploymentStatus(application.get());
for (var name : existingInstances)
if ( ! declaredInstances.contains(name))
controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
store(application);
return application;
} | controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name))); | public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
var existingInstances = application.get().instances().keySet();
var declaredInstances = applicationPackage.deploymentSpec().instanceNames();
for (var name : declaredInstances)
if ( ! existingInstances.contains(name))
application = withNewInstance(application, application.get().id().instance(name));
for (InstanceName name : existingInstances) {
application = withoutDeletedDeployments(application, name);
}
for (InstanceName instance : declaredInstances)
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
controller.jobController().deploymentStatus(application.get());
for (var name : existingInstances)
if ( ! declaredInstances.contains(name))
controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
store(application);
return application;
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final ApplicationPackageValidator applicationPackageValidator;
private final EndpointCertificates endpointCertificates;
private final StringFlag dockerImageRepoFlag;
private final BillingController billingController;
ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock,
FlagSource flagSource, BillingController billingController) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource);
this.billingController = billingController;
deploymentTrigger = new DeploymentTrigger(controller, clock);
applicationPackageValidator = new ApplicationPackageValidator(controller);
endpointCertificates = new EndpointCertificates(controller,
controller.serviceRegistry().endpointCertificateProvider(),
controller.serviceRegistry().endpointCertificateValidator());
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (TenantAndApplicationId id : curator.readApplicationIds()) {
lockApplicationIfPresent(id, application -> {
for (InstanceName instance : application.get().deploymentSpec().instanceNames())
if (!application.get().instances().containsKey(instance))
application = withNewInstance(application, id.instance(instance));
store(application);
});
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> getApplication(TenantAndApplicationId id) {
return curator.readApplication(id);
}
/** Returns the instance with the given id, or null if it is not present */
public Optional<Instance> getInstance(ApplicationId id) {
return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance()));
}
/**
* Triggers reindexing for the given document types in the given clusters, for the given application.
*
* If no clusters are given, reindexing is triggered for the entire application; otherwise
* if no documents types are given, reindexing is triggered for all given clusters; otherwise
* reindexing is triggered for the cartesian product of the given clusters and document types.
*/
public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) {
configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly);
}
/** Returns the reindexing status for the given application in the given zone. */
public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) {
return configServer.getReindexing(new DeploymentId(id, zoneId));
}
/** Enables reindexing for the given application in the given zone. */
public void enableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.enableReindexing(new DeploymentId(id, zoneId));
}
/** Disables reindexing for the given application in the given zone. */
public void disableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.disableReindexing(new DeploymentId(id, zoneId));
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application requireApplication(TenantAndApplicationId id) {
return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/**
* Returns the instance with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Instance requireInstance(ApplicationId id) {
return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return curator.readApplications(false);
}
/**
* Returns a snapshot of all readable applications. Unlike {@link ApplicationController
* applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete
* snapshot.
*
* This should only be used in cases where acting on a subset of applications is better than none.
*/
public List<Application> readable() {
return curator.readApplications(true);
}
/** Returns the ID of all known applications. */
public List<TenantAndApplicationId> idList() {
return curator.readApplicationIds();
}
/** Returns a snapshot of all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return curator.readApplications(tenant);
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns all currently reachable content clusters among the given deployments. */
public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) {
Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (DeploymentId id : ids)
if (isHealthy(id))
clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id)));
return Collections.unmodifiableMap(clusters);
}
/** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */
private Optional<Version> oldestInstalledPlatform(JobStatus job) {
Version oldest = null;
for (Run run : job.runs().descendingMap().values()) {
Version version = run.versions().targetPlatform();
if (oldest == null || version.isBefore(oldest))
oldest = version;
if (run.status() == RunStatus.success)
return Optional.of(oldest);
}
return oldestInstalledPlatform(job.id());
}
/** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */
private Optional<Version> oldestInstalledPlatform(JobId job) {
return configServer.nodeRepository().list(job.type().zone(controller.system()),
job.application(),
EnumSet.of(active, reserved))
.stream()
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder());
}
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(TenantAndApplicationId id) {
return controller.jobController().deploymentStatus(requireApplication(id)).jobs()
.production().asList().stream()
.map(this::oldestInstalledPlatform)
.flatMap(Optional::stream)
.min(naturalOrder())
.orElse(controller.readSystemVersion());
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(TenantAndApplicationId id, Credentials credentials) {
try (Lock lock = lock(id)) {
if (getApplication(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (getApplication(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
if (controller.tenants().get(id.tenant()).isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
accessControl.createApplication(id, credentials);
LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock);
store(locked);
log.info("Created " + locked);
return locked.get();
}
}
/**
* Creates a new instance for an existing application.
*
* @throws IllegalArgumentException if the instance already exists, or has an invalid instance name.
*/
public void createInstance(ApplicationId id) {
lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
store(withNewInstance(application, id));
});
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) {
return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version));
}
/** Returns given application with a new instance */
public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) {
if (instance.instance().isTester())
throw new IllegalArgumentException("'" + instance + "' is a tester application!");
InstanceId.validate(instance.instance().value());
if (getInstance(instance).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists");
if (getInstance(dashToUnderscore(instance)).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists");
log.info("Created " + instance);
return application.withNewInstance(instance.instance());
}
/** Deploys an application package for an existing application instance. */
public ActivateResult deploy(JobId job, boolean deploySourceVersions) {
if (job.application().instance().isTester())
throw new IllegalArgumentException("'" + job.application() + "' is a tester application!");
TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application());
ZoneId zone = job.type().zone(controller.system());
try (Lock deploymentLock = lockForDeployment(job.application(), zone)) {
Set<ContainerEndpoint> containerEndpoints;
Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
Optional<TenantRoles> tenantRoles = Optional.empty();
Run run = controller.jobController().last(job)
.orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'"));
if (run.hasEnded())
throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running");
Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform());
ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication());
ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision);
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
Instance instance = application.get().require(job.application().instance());
rejectOldChange(instance, platform, revision, job, zone);
if ( ! applicationPackage.trustedCertificates().isEmpty()
&& run.testerCertificate().isPresent())
applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get());
endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name()));
containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone);
}
ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles);
var quotaUsage = deploymentQuotaUsage(zone, job.application());
NotificationSource source = zone.environment().isManuallyDeployed() ?
NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId);
List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
.map(logs -> logs.stream()
.filter(log -> log.applicationPackage)
.filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue())
.map(log -> log.message)
.sorted()
.distinct()
.collect(Collectors.toList()))
.orElseGet(List::of);
if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage);
else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings);
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
instance -> instance.withNewDeployment(zone, revision, platform,
clock.instant(), warningsFrom(result),
quotaUsage))));
return result;
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) {
return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints,
Optional<EndpointCertificateMetadata> endpointCertificateMetadata,
Optional<TenantRoles> tenantRoles) {
try {
Optional<DockerImage> dockerImageRepo = Optional.ofNullable(
dockerImageRepoFlag
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
.value())
.filter(s -> !s.isBlank())
.map(DockerImage::fromString);
Optional<AthenzDomain> domain = controller.tenants().get(application.tenant())
.filter(tenant-> tenant instanceof AthenzTenant)
.map(tenant -> ((AthenzTenant)tenant).domain());
if (zone.environment().isManuallyDeployed())
controller.applications().applicationStore().putMeta(new DeploymentId(application, zone),
clock.instant(),
applicationPackage.metaDataZip());
Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()),
asList(application.tenant()), application, zone, applicationPackage.deploymentSpec());
List<TenantSecretStore> tenantSecretStores = controller.tenants()
.get(application.tenant())
.filter(tenant-> tenant instanceof CloudTenant)
.map(tenant -> ((CloudTenant) tenant).tenantSecretStores())
.orElse(List.of());
List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream()
.map(SupportAccessGrant::certificate)
.collect(toList());
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform,
endpoints, endpointCertificateMetadata, dockerImageRepo, domain,
tenantRoles, deploymentQuota, tenantSecretStores, operatorCertificates));
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream()
.map(Deployment::zone)
.filter(zone -> deploymentSpec.instance(instance).isEmpty()
|| ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(),
zone.region()))
.collect(toList());
if (deploymentsToRemove.isEmpty())
return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) +
" is deployed in " +
deploymentsToRemove.stream()
.map(zone -> zone.region().value())
.collect(joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance)
&& application.get().require(instance).deployments().size() == deploymentsToRemove.size();
for (ZoneId zone : deploymentsToRemove)
application = deactivate(application, instance, zone);
if (removeInstance)
application = application.without(instance);
return application;
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
public void deleteApplication(TenantAndApplicationId id, Credentials credentials) {
lockApplicationOrThrow(id, application -> {
var deployments = application.get().instances().values().stream()
.filter(instance -> ! instance.deployments().isEmpty())
.collect(toMap(instance -> instance.name(),
instance -> instance.deployments().keySet().stream()
.map(ZoneId::toString)
.collect(joining(", "))));
if ( ! deployments.isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments);
for (Instance instance : application.get().instances().values()) {
controller.routing().removeEndpointsInDns(application.get(), instance.name());
application = application.without(instance.name());
}
applicationStore.removeAll(id.tenant(), id.application());
applicationStore.removeAllTesters(id.tenant(), id.application());
applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant());
accessControl.deleteApplication(id, credentials);
curator.removeApplication(id);
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
log.info("Deleted " + id);
});
}
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId instanceId) {
if (getInstance(instanceId).isEmpty())
throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found");
lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> {
if ( ! application.get().require(instanceId.instance()).deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(joining(", ")));
if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty)
&& application.get().deploymentSpec().instanceNames().contains(instanceId.instance()))
throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first");
controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
log.info("Deleted " + instanceId);
});
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(requireApplication(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param restartFilter Variables to filter which nodes to restart.
*/
public void restart(DeploymentId deploymentId, RestartFilter restartFilter) {
configServer.restart(deploymentId, restartFilter);
}
/**
* Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual.
* If this cannot be ascertained, we must assumed it is not.
*/
public boolean isHealthy(DeploymentId deploymentId) {
try {
return ! isSuspended(deploymentId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e));
return false;
}
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
return configServer.isSuspended(deploymentId);
}
/** Sets suspension status of the given deployment in its zone. */
public void setSuspension(DeploymentId deploymentId, boolean suspend) {
configServer.setSuspension(deploymentId, suspend);
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId id, ZoneId zone) {
lockApplicationOrThrow(TenantAndApplicationId.from(id),
application -> store(deactivate(application, id.instance(), zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) {
DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone);
try {
configServer.deactivate(id);
} finally {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
if (!zone.environment().isTest())
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
}
return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(TenantAndApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName tenant where application should be deployed
* @param applicationPackage application package
* @param deployer principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain()
.map(domain -> new AthenzDomain(domain.value()));
if(identityDomain.isEmpty()) {
return;
}
if(! (accessControl instanceof AthenzFacade)) {
throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system.");
}
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
Optional<AthenzUser> athenzUser = getUser(deployer);
if (athenzUser.isPresent()) {
var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
var serviceToLaunch = instanceName
.flatMap(instance -> applicationPackage.deploymentSpec().instance(instance))
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> applicationPackage.deploymentSpec().athenzService())
.map(service -> new AthenzService(identityDomain.get(), service.value()));
if(serviceToLaunch.isPresent()) {
if (
! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) &&
! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get())
) {
throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " +
"service " + serviceToLaunch.get().getFullName() + ". " +
"Please reach out to the domain admin.");
}
} else {
throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value());
}
} else {
Tenant tenant = controller.tenants().require(tenantName);
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain, identityDomain.get()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
}
}
private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) {
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) return;
if (!zone.environment().isProduction()) return;
boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned();
boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 &&
!(revision.isUnknown() && controller.system().isCd());
if (platformIsOlder || revisionIsOlder)
throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion()));
}
private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) {
return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"));
}
private ApplicationId dashToUnderscore(ApplicationId id) {
return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance());
}
private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) {
var application = configServer.nodeRepository().getApplication(zoneId, applicationId);
return DeploymentQuotaCalculator.calculateQuotaUsage(application);
}
private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) {
return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone)
: applicationStore.get(application.tenant(), application.application(), revision));
}
/*
* Get the AthenzUser from this principal or Optional.empty if this does not represent a user.
*/
private Optional<AthenzUser> getUser(Optional<Principal> deployer) {
return deployer
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast);
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(domain -> {
controller.zoneRegistry().zones().reachable().ids().forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone);
deploymentSpec.athenzService().ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
deploymentSpec.instances().forEach(spec -> {
spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
});
});
});
}
private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) {
if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService))
throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName());
}
/** Returns the latest known version within the given major. */
public Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.readVersionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final ApplicationPackageValidator applicationPackageValidator;
private final EndpointCertificates endpointCertificates;
private final StringFlag dockerImageRepoFlag;
private final BillingController billingController;
ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock,
FlagSource flagSource, BillingController billingController) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource);
this.billingController = billingController;
deploymentTrigger = new DeploymentTrigger(controller, clock);
applicationPackageValidator = new ApplicationPackageValidator(controller);
endpointCertificates = new EndpointCertificates(controller,
controller.serviceRegistry().endpointCertificateProvider(),
controller.serviceRegistry().endpointCertificateValidator());
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (TenantAndApplicationId id : curator.readApplicationIds()) {
lockApplicationIfPresent(id, application -> {
for (InstanceName instance : application.get().deploymentSpec().instanceNames())
if (!application.get().instances().containsKey(instance))
application = withNewInstance(application, id.instance(instance));
store(application);
});
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> getApplication(TenantAndApplicationId id) {
return curator.readApplication(id);
}
/** Returns the instance with the given id, or null if it is not present */
public Optional<Instance> getInstance(ApplicationId id) {
return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance()));
}
/**
* Triggers reindexing for the given document types in the given clusters, for the given application.
*
* If no clusters are given, reindexing is triggered for the entire application; otherwise
* if no documents types are given, reindexing is triggered for all given clusters; otherwise
* reindexing is triggered for the cartesian product of the given clusters and document types.
*/
public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) {
configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly);
}
/** Returns the reindexing status for the given application in the given zone. */
public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) {
return configServer.getReindexing(new DeploymentId(id, zoneId));
}
/** Enables reindexing for the given application in the given zone. */
public void enableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.enableReindexing(new DeploymentId(id, zoneId));
}
/** Disables reindexing for the given application in the given zone. */
public void disableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.disableReindexing(new DeploymentId(id, zoneId));
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application requireApplication(TenantAndApplicationId id) {
return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/**
* Returns the instance with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Instance requireInstance(ApplicationId id) {
return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return curator.readApplications(false);
}
/**
* Returns a snapshot of all readable applications. Unlike {@link ApplicationController
* applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete
* snapshot.
*
* This should only be used in cases where acting on a subset of applications is better than none.
*/
public List<Application> readable() {
return curator.readApplications(true);
}
/** Returns the ID of all known applications. */
public List<TenantAndApplicationId> idList() {
return curator.readApplicationIds();
}
/** Returns a snapshot of all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return curator.readApplications(tenant);
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns all currently reachable content clusters among the given deployments. */
public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) {
Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (DeploymentId id : ids)
if (isHealthy(id))
clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id)));
return Collections.unmodifiableMap(clusters);
}
/** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */
private Optional<Version> oldestInstalledPlatform(JobStatus job) {
Version oldest = null;
for (Run run : job.runs().descendingMap().values()) {
Version version = run.versions().targetPlatform();
if (oldest == null || version.isBefore(oldest))
oldest = version;
if (run.status() == RunStatus.success)
return Optional.of(oldest);
}
return oldestInstalledPlatform(job.id());
}
/** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */
private Optional<Version> oldestInstalledPlatform(JobId job) {
return configServer.nodeRepository().list(job.type().zone(controller.system()),
job.application(),
EnumSet.of(active, reserved))
.stream()
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder());
}
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(TenantAndApplicationId id) {
return controller.jobController().deploymentStatus(requireApplication(id)).jobs()
.production().asList().stream()
.map(this::oldestInstalledPlatform)
.flatMap(Optional::stream)
.min(naturalOrder())
.orElse(controller.readSystemVersion());
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(TenantAndApplicationId id, Credentials credentials) {
try (Lock lock = lock(id)) {
if (getApplication(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (getApplication(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
if (controller.tenants().get(id.tenant()).isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
accessControl.createApplication(id, credentials);
LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock);
store(locked);
log.info("Created " + locked);
return locked.get();
}
}
/**
* Creates a new instance for an existing application.
*
* @throws IllegalArgumentException if the instance already exists, or has an invalid instance name.
*/
public void createInstance(ApplicationId id) {
lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
store(withNewInstance(application, id));
});
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) {
return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version));
}
/** Returns given application with a new instance */
public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) {
if (instance.instance().isTester())
throw new IllegalArgumentException("'" + instance + "' is a tester application!");
InstanceId.validate(instance.instance().value());
if (getInstance(instance).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists");
if (getInstance(dashToUnderscore(instance)).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists");
log.info("Created " + instance);
return application.withNewInstance(instance.instance());
}
/** Deploys an application package for an existing application instance. */
public ActivateResult deploy(JobId job, boolean deploySourceVersions) {
if (job.application().instance().isTester())
throw new IllegalArgumentException("'" + job.application() + "' is a tester application!");
TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application());
ZoneId zone = job.type().zone(controller.system());
try (Lock deploymentLock = lockForDeployment(job.application(), zone)) {
Set<ContainerEndpoint> containerEndpoints;
Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
Optional<TenantRoles> tenantRoles = Optional.empty();
Run run = controller.jobController().last(job)
.orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'"));
if (run.hasEnded())
throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running");
Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform());
ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication());
ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision);
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
Instance instance = application.get().require(job.application().instance());
rejectOldChange(instance, platform, revision, job, zone);
if ( ! applicationPackage.trustedCertificates().isEmpty()
&& run.testerCertificate().isPresent())
applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get());
endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name()));
containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone);
}
ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles);
var quotaUsage = deploymentQuotaUsage(zone, job.application());
NotificationSource source = zone.environment().isManuallyDeployed() ?
NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId);
List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
.map(logs -> logs.stream()
.filter(log -> log.applicationPackage)
.filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue())
.map(log -> log.message)
.sorted()
.distinct()
.collect(Collectors.toList()))
.orElseGet(List::of);
if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage);
else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings);
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
instance -> instance.withNewDeployment(zone, revision, platform,
clock.instant(), warningsFrom(result),
quotaUsage))));
return result;
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) {
return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints,
Optional<EndpointCertificateMetadata> endpointCertificateMetadata,
Optional<TenantRoles> tenantRoles) {
try {
Optional<DockerImage> dockerImageRepo = Optional.ofNullable(
dockerImageRepoFlag
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
.value())
.filter(s -> !s.isBlank())
.map(DockerImage::fromString);
Optional<AthenzDomain> domain = controller.tenants().get(application.tenant())
.filter(tenant-> tenant instanceof AthenzTenant)
.map(tenant -> ((AthenzTenant)tenant).domain());
if (zone.environment().isManuallyDeployed())
controller.applications().applicationStore().putMeta(new DeploymentId(application, zone),
clock.instant(),
applicationPackage.metaDataZip());
Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()),
asList(application.tenant()), application, zone, applicationPackage.deploymentSpec());
List<TenantSecretStore> tenantSecretStores = controller.tenants()
.get(application.tenant())
.filter(tenant-> tenant instanceof CloudTenant)
.map(tenant -> ((CloudTenant) tenant).tenantSecretStores())
.orElse(List.of());
List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream()
.map(SupportAccessGrant::certificate)
.collect(toList());
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform,
endpoints, endpointCertificateMetadata, dockerImageRepo, domain,
tenantRoles, deploymentQuota, tenantSecretStores, operatorCertificates));
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream()
.map(Deployment::zone)
.filter(zone -> deploymentSpec.instance(instance).isEmpty()
|| ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(),
zone.region()))
.collect(toList());
if (deploymentsToRemove.isEmpty())
return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) +
" is deployed in " +
deploymentsToRemove.stream()
.map(zone -> zone.region().value())
.collect(joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance)
&& application.get().require(instance).deployments().size() == deploymentsToRemove.size();
for (ZoneId zone : deploymentsToRemove)
application = deactivate(application, instance, zone);
if (removeInstance)
application = application.without(instance);
return application;
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
public void deleteApplication(TenantAndApplicationId id, Credentials credentials) {
lockApplicationOrThrow(id, application -> {
var deployments = application.get().instances().values().stream()
.filter(instance -> ! instance.deployments().isEmpty())
.collect(toMap(instance -> instance.name(),
instance -> instance.deployments().keySet().stream()
.map(ZoneId::toString)
.collect(joining(", "))));
if ( ! deployments.isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments);
for (Instance instance : application.get().instances().values()) {
controller.routing().removeEndpointsInDns(application.get(), instance.name());
application = application.without(instance.name());
}
applicationStore.removeAll(id.tenant(), id.application());
applicationStore.removeAllTesters(id.tenant(), id.application());
applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant());
accessControl.deleteApplication(id, credentials);
curator.removeApplication(id);
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
log.info("Deleted " + id);
});
}
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId instanceId) {
if (getInstance(instanceId).isEmpty())
throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found");
lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> {
if ( ! application.get().require(instanceId.instance()).deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(joining(", ")));
if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty)
&& application.get().deploymentSpec().instanceNames().contains(instanceId.instance()))
throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first");
controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
log.info("Deleted " + instanceId);
});
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(requireApplication(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param restartFilter Variables to filter which nodes to restart.
*/
public void restart(DeploymentId deploymentId, RestartFilter restartFilter) {
configServer.restart(deploymentId, restartFilter);
}
/**
* Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual.
* If this cannot be ascertained, we must assumed it is not.
*/
public boolean isHealthy(DeploymentId deploymentId) {
try {
return ! isSuspended(deploymentId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e));
return false;
}
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
return configServer.isSuspended(deploymentId);
}
/** Sets suspension status of the given deployment in its zone. */
public void setSuspension(DeploymentId deploymentId, boolean suspend) {
configServer.setSuspension(deploymentId, suspend);
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId id, ZoneId zone) {
lockApplicationOrThrow(TenantAndApplicationId.from(id),
application -> store(deactivate(application, id.instance(), zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) {
DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone);
try {
configServer.deactivate(id);
} finally {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
if (!zone.environment().isTest())
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
}
return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(TenantAndApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName tenant where application should be deployed
* @param applicationPackage application package
* @param deployer principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain()
.map(domain -> new AthenzDomain(domain.value()));
if(identityDomain.isEmpty()) {
return;
}
if(! (accessControl instanceof AthenzFacade)) {
throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system.");
}
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
Optional<AthenzUser> athenzUser = getUser(deployer);
if (athenzUser.isPresent()) {
var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
var serviceToLaunch = instanceName
.flatMap(instance -> applicationPackage.deploymentSpec().instance(instance))
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> applicationPackage.deploymentSpec().athenzService())
.map(service -> new AthenzService(identityDomain.get(), service.value()));
if(serviceToLaunch.isPresent()) {
if (
! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) &&
! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get())
) {
throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " +
"service " + serviceToLaunch.get().getFullName() + ". " +
"Please reach out to the domain admin.");
}
} else {
throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value());
}
} else {
Tenant tenant = controller.tenants().require(tenantName);
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain, identityDomain.get()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
}
}
private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) {
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) return;
if (!zone.environment().isProduction()) return;
boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned();
boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 &&
!(revision.isUnknown() && controller.system().isCd());
if (platformIsOlder || revisionIsOlder)
throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion()));
}
private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) {
return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"));
}
private ApplicationId dashToUnderscore(ApplicationId id) {
return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance());
}
private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) {
var application = configServer.nodeRepository().getApplication(zoneId, applicationId);
return DeploymentQuotaCalculator.calculateQuotaUsage(application);
}
private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) {
return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone)
: applicationStore.get(application.tenant(), application.application(), revision));
}
/*
* Get the AthenzUser from this principal or Optional.empty if this does not represent a user.
*/
private Optional<AthenzUser> getUser(Optional<Principal> deployer) {
return deployer
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast);
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(domain -> {
controller.zoneRegistry().zones().reachable().ids().forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone);
deploymentSpec.athenzService().ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
deploymentSpec.instances().forEach(spec -> {
spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
});
});
});
}
private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) {
if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService))
throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName());
}
/** Returns the latest known version within the given major. */
public Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.readVersionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} |
Yes, I'm not sure what happens to those now and if its safe to do it here? FYI: @mpolden | public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
var existingInstances = application.get().instances().keySet();
var declaredInstances = applicationPackage.deploymentSpec().instanceNames();
for (var name : declaredInstances)
if ( ! existingInstances.contains(name))
application = withNewInstance(application, application.get().id().instance(name));
for (InstanceName name : existingInstances) {
application = withoutDeletedDeployments(application, name);
}
for (InstanceName instance : declaredInstances)
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
controller.jobController().deploymentStatus(application.get());
for (var name : existingInstances)
if ( ! declaredInstances.contains(name))
controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
store(application);
return application;
} | controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name))); | public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
var existingInstances = application.get().instances().keySet();
var declaredInstances = applicationPackage.deploymentSpec().instanceNames();
for (var name : declaredInstances)
if ( ! existingInstances.contains(name))
application = withNewInstance(application, application.get().id().instance(name));
for (InstanceName name : existingInstances) {
application = withoutDeletedDeployments(application, name);
}
for (InstanceName instance : declaredInstances)
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
controller.jobController().deploymentStatus(application.get());
for (var name : existingInstances)
if ( ! declaredInstances.contains(name))
controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
store(application);
return application;
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final ApplicationPackageValidator applicationPackageValidator;
private final EndpointCertificates endpointCertificates;
private final StringFlag dockerImageRepoFlag;
private final BillingController billingController;
ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock,
FlagSource flagSource, BillingController billingController) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource);
this.billingController = billingController;
deploymentTrigger = new DeploymentTrigger(controller, clock);
applicationPackageValidator = new ApplicationPackageValidator(controller);
endpointCertificates = new EndpointCertificates(controller,
controller.serviceRegistry().endpointCertificateProvider(),
controller.serviceRegistry().endpointCertificateValidator());
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (TenantAndApplicationId id : curator.readApplicationIds()) {
lockApplicationIfPresent(id, application -> {
for (InstanceName instance : application.get().deploymentSpec().instanceNames())
if (!application.get().instances().containsKey(instance))
application = withNewInstance(application, id.instance(instance));
store(application);
});
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> getApplication(TenantAndApplicationId id) {
return curator.readApplication(id);
}
/** Returns the instance with the given id, or null if it is not present */
public Optional<Instance> getInstance(ApplicationId id) {
return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance()));
}
/**
* Triggers reindexing for the given document types in the given clusters, for the given application.
*
* If no clusters are given, reindexing is triggered for the entire application; otherwise
* if no documents types are given, reindexing is triggered for all given clusters; otherwise
* reindexing is triggered for the cartesian product of the given clusters and document types.
*/
public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) {
configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly);
}
/** Returns the reindexing status for the given application in the given zone. */
public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) {
return configServer.getReindexing(new DeploymentId(id, zoneId));
}
/** Enables reindexing for the given application in the given zone. */
public void enableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.enableReindexing(new DeploymentId(id, zoneId));
}
/** Disables reindexing for the given application in the given zone. */
public void disableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.disableReindexing(new DeploymentId(id, zoneId));
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application requireApplication(TenantAndApplicationId id) {
return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/**
* Returns the instance with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Instance requireInstance(ApplicationId id) {
return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return curator.readApplications(false);
}
/**
* Returns a snapshot of all readable applications. Unlike {@link ApplicationController
* applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete
* snapshot.
*
* This should only be used in cases where acting on a subset of applications is better than none.
*/
public List<Application> readable() {
return curator.readApplications(true);
}
/** Returns the ID of all known applications. */
public List<TenantAndApplicationId> idList() {
return curator.readApplicationIds();
}
/** Returns a snapshot of all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return curator.readApplications(tenant);
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns all currently reachable content clusters among the given deployments. */
public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) {
Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (DeploymentId id : ids)
if (isHealthy(id))
clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id)));
return Collections.unmodifiableMap(clusters);
}
/** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */
private Optional<Version> oldestInstalledPlatform(JobStatus job) {
Version oldest = null;
for (Run run : job.runs().descendingMap().values()) {
Version version = run.versions().targetPlatform();
if (oldest == null || version.isBefore(oldest))
oldest = version;
if (run.status() == RunStatus.success)
return Optional.of(oldest);
}
return oldestInstalledPlatform(job.id());
}
/** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */
private Optional<Version> oldestInstalledPlatform(JobId job) {
return configServer.nodeRepository().list(job.type().zone(controller.system()),
job.application(),
EnumSet.of(active, reserved))
.stream()
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder());
}
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(TenantAndApplicationId id) {
return controller.jobController().deploymentStatus(requireApplication(id)).jobs()
.production().asList().stream()
.map(this::oldestInstalledPlatform)
.flatMap(Optional::stream)
.min(naturalOrder())
.orElse(controller.readSystemVersion());
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(TenantAndApplicationId id, Credentials credentials) {
try (Lock lock = lock(id)) {
if (getApplication(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (getApplication(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
if (controller.tenants().get(id.tenant()).isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
accessControl.createApplication(id, credentials);
LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock);
store(locked);
log.info("Created " + locked);
return locked.get();
}
}
/**
* Creates a new instance for an existing application.
*
* @throws IllegalArgumentException if the instance already exists, or has an invalid instance name.
*/
public void createInstance(ApplicationId id) {
lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
store(withNewInstance(application, id));
});
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) {
return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version));
}
/** Returns given application with a new instance */
public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) {
if (instance.instance().isTester())
throw new IllegalArgumentException("'" + instance + "' is a tester application!");
InstanceId.validate(instance.instance().value());
if (getInstance(instance).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists");
if (getInstance(dashToUnderscore(instance)).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists");
log.info("Created " + instance);
return application.withNewInstance(instance.instance());
}
/** Deploys an application package for an existing application instance. */
public ActivateResult deploy(JobId job, boolean deploySourceVersions) {
if (job.application().instance().isTester())
throw new IllegalArgumentException("'" + job.application() + "' is a tester application!");
TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application());
ZoneId zone = job.type().zone(controller.system());
try (Lock deploymentLock = lockForDeployment(job.application(), zone)) {
Set<ContainerEndpoint> containerEndpoints;
Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
Optional<TenantRoles> tenantRoles = Optional.empty();
Run run = controller.jobController().last(job)
.orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'"));
if (run.hasEnded())
throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running");
Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform());
ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication());
ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision);
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
Instance instance = application.get().require(job.application().instance());
rejectOldChange(instance, platform, revision, job, zone);
if ( ! applicationPackage.trustedCertificates().isEmpty()
&& run.testerCertificate().isPresent())
applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get());
endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name()));
containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone);
}
ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles);
var quotaUsage = deploymentQuotaUsage(zone, job.application());
NotificationSource source = zone.environment().isManuallyDeployed() ?
NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId);
List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
.map(logs -> logs.stream()
.filter(log -> log.applicationPackage)
.filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue())
.map(log -> log.message)
.sorted()
.distinct()
.collect(Collectors.toList()))
.orElseGet(List::of);
if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage);
else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings);
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
instance -> instance.withNewDeployment(zone, revision, platform,
clock.instant(), warningsFrom(result),
quotaUsage))));
return result;
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) {
return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints,
Optional<EndpointCertificateMetadata> endpointCertificateMetadata,
Optional<TenantRoles> tenantRoles) {
try {
Optional<DockerImage> dockerImageRepo = Optional.ofNullable(
dockerImageRepoFlag
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
.value())
.filter(s -> !s.isBlank())
.map(DockerImage::fromString);
Optional<AthenzDomain> domain = controller.tenants().get(application.tenant())
.filter(tenant-> tenant instanceof AthenzTenant)
.map(tenant -> ((AthenzTenant)tenant).domain());
if (zone.environment().isManuallyDeployed())
controller.applications().applicationStore().putMeta(new DeploymentId(application, zone),
clock.instant(),
applicationPackage.metaDataZip());
Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()),
asList(application.tenant()), application, zone, applicationPackage.deploymentSpec());
List<TenantSecretStore> tenantSecretStores = controller.tenants()
.get(application.tenant())
.filter(tenant-> tenant instanceof CloudTenant)
.map(tenant -> ((CloudTenant) tenant).tenantSecretStores())
.orElse(List.of());
List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream()
.map(SupportAccessGrant::certificate)
.collect(toList());
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform,
endpoints, endpointCertificateMetadata, dockerImageRepo, domain,
tenantRoles, deploymentQuota, tenantSecretStores, operatorCertificates));
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream()
.map(Deployment::zone)
.filter(zone -> deploymentSpec.instance(instance).isEmpty()
|| ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(),
zone.region()))
.collect(toList());
if (deploymentsToRemove.isEmpty())
return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) +
" is deployed in " +
deploymentsToRemove.stream()
.map(zone -> zone.region().value())
.collect(joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance)
&& application.get().require(instance).deployments().size() == deploymentsToRemove.size();
for (ZoneId zone : deploymentsToRemove)
application = deactivate(application, instance, zone);
if (removeInstance)
application = application.without(instance);
return application;
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
public void deleteApplication(TenantAndApplicationId id, Credentials credentials) {
lockApplicationOrThrow(id, application -> {
var deployments = application.get().instances().values().stream()
.filter(instance -> ! instance.deployments().isEmpty())
.collect(toMap(instance -> instance.name(),
instance -> instance.deployments().keySet().stream()
.map(ZoneId::toString)
.collect(joining(", "))));
if ( ! deployments.isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments);
for (Instance instance : application.get().instances().values()) {
controller.routing().removeEndpointsInDns(application.get(), instance.name());
application = application.without(instance.name());
}
applicationStore.removeAll(id.tenant(), id.application());
applicationStore.removeAllTesters(id.tenant(), id.application());
applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant());
accessControl.deleteApplication(id, credentials);
curator.removeApplication(id);
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
log.info("Deleted " + id);
});
}
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId instanceId) {
if (getInstance(instanceId).isEmpty())
throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found");
lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> {
if ( ! application.get().require(instanceId.instance()).deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(joining(", ")));
if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty)
&& application.get().deploymentSpec().instanceNames().contains(instanceId.instance()))
throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first");
controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
log.info("Deleted " + instanceId);
});
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(requireApplication(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param restartFilter Variables to filter which nodes to restart.
*/
public void restart(DeploymentId deploymentId, RestartFilter restartFilter) {
configServer.restart(deploymentId, restartFilter);
}
/**
* Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual.
* If this cannot be ascertained, we must assumed it is not.
*/
public boolean isHealthy(DeploymentId deploymentId) {
try {
return ! isSuspended(deploymentId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e));
return false;
}
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
return configServer.isSuspended(deploymentId);
}
/** Sets suspension status of the given deployment in its zone. */
public void setSuspension(DeploymentId deploymentId, boolean suspend) {
configServer.setSuspension(deploymentId, suspend);
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId id, ZoneId zone) {
lockApplicationOrThrow(TenantAndApplicationId.from(id),
application -> store(deactivate(application, id.instance(), zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) {
DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone);
try {
configServer.deactivate(id);
} finally {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
if (!zone.environment().isTest())
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
}
return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(TenantAndApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName tenant where application should be deployed
* @param applicationPackage application package
* @param deployer principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain()
.map(domain -> new AthenzDomain(domain.value()));
if(identityDomain.isEmpty()) {
return;
}
if(! (accessControl instanceof AthenzFacade)) {
throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system.");
}
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
Optional<AthenzUser> athenzUser = getUser(deployer);
if (athenzUser.isPresent()) {
var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
var serviceToLaunch = instanceName
.flatMap(instance -> applicationPackage.deploymentSpec().instance(instance))
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> applicationPackage.deploymentSpec().athenzService())
.map(service -> new AthenzService(identityDomain.get(), service.value()));
if(serviceToLaunch.isPresent()) {
if (
! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) &&
! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get())
) {
throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " +
"service " + serviceToLaunch.get().getFullName() + ". " +
"Please reach out to the domain admin.");
}
} else {
throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value());
}
} else {
Tenant tenant = controller.tenants().require(tenantName);
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain, identityDomain.get()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
}
}
private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) {
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) return;
if (!zone.environment().isProduction()) return;
boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned();
boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 &&
!(revision.isUnknown() && controller.system().isCd());
if (platformIsOlder || revisionIsOlder)
throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion()));
}
private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) {
return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"));
}
private ApplicationId dashToUnderscore(ApplicationId id) {
return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance());
}
private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) {
var application = configServer.nodeRepository().getApplication(zoneId, applicationId);
return DeploymentQuotaCalculator.calculateQuotaUsage(application);
}
private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) {
return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone)
: applicationStore.get(application.tenant(), application.application(), revision));
}
/*
* Get the AthenzUser from this principal or Optional.empty if this does not represent a user.
*/
private Optional<AthenzUser> getUser(Optional<Principal> deployer) {
return deployer
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast);
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(domain -> {
controller.zoneRegistry().zones().reachable().ids().forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone);
deploymentSpec.athenzService().ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
deploymentSpec.instances().forEach(spec -> {
spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
});
});
});
}
private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) {
if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService))
throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName());
}
/** Returns the latest known version within the given major. */
public Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.readVersionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final ApplicationPackageValidator applicationPackageValidator;
private final EndpointCertificates endpointCertificates;
private final StringFlag dockerImageRepoFlag;
private final BillingController billingController;
ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock,
FlagSource flagSource, BillingController billingController) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource);
this.billingController = billingController;
deploymentTrigger = new DeploymentTrigger(controller, clock);
applicationPackageValidator = new ApplicationPackageValidator(controller);
endpointCertificates = new EndpointCertificates(controller,
controller.serviceRegistry().endpointCertificateProvider(),
controller.serviceRegistry().endpointCertificateValidator());
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (TenantAndApplicationId id : curator.readApplicationIds()) {
lockApplicationIfPresent(id, application -> {
for (InstanceName instance : application.get().deploymentSpec().instanceNames())
if (!application.get().instances().containsKey(instance))
application = withNewInstance(application, id.instance(instance));
store(application);
});
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> getApplication(TenantAndApplicationId id) {
return curator.readApplication(id);
}
/** Returns the instance with the given id, or null if it is not present */
public Optional<Instance> getInstance(ApplicationId id) {
return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance()));
}
/**
* Triggers reindexing for the given document types in the given clusters, for the given application.
*
* If no clusters are given, reindexing is triggered for the entire application; otherwise
* if no documents types are given, reindexing is triggered for all given clusters; otherwise
* reindexing is triggered for the cartesian product of the given clusters and document types.
*/
public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) {
configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly);
}
/** Returns the reindexing status for the given application in the given zone. */
public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) {
return configServer.getReindexing(new DeploymentId(id, zoneId));
}
/** Enables reindexing for the given application in the given zone. */
public void enableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.enableReindexing(new DeploymentId(id, zoneId));
}
/** Disables reindexing for the given application in the given zone. */
public void disableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.disableReindexing(new DeploymentId(id, zoneId));
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application requireApplication(TenantAndApplicationId id) {
return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/**
* Returns the instance with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Instance requireInstance(ApplicationId id) {
return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return curator.readApplications(false);
}
/**
* Returns a snapshot of all readable applications. Unlike {@link ApplicationController
* applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete
* snapshot.
*
* This should only be used in cases where acting on a subset of applications is better than none.
*/
public List<Application> readable() {
return curator.readApplications(true);
}
/** Returns the ID of all known applications. */
public List<TenantAndApplicationId> idList() {
return curator.readApplicationIds();
}
/** Returns a snapshot of all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return curator.readApplications(tenant);
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns all currently reachable content clusters among the given deployments. */
public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) {
Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (DeploymentId id : ids)
if (isHealthy(id))
clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id)));
return Collections.unmodifiableMap(clusters);
}
/** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */
private Optional<Version> oldestInstalledPlatform(JobStatus job) {
Version oldest = null;
for (Run run : job.runs().descendingMap().values()) {
Version version = run.versions().targetPlatform();
if (oldest == null || version.isBefore(oldest))
oldest = version;
if (run.status() == RunStatus.success)
return Optional.of(oldest);
}
return oldestInstalledPlatform(job.id());
}
/** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */
private Optional<Version> oldestInstalledPlatform(JobId job) {
return configServer.nodeRepository().list(job.type().zone(controller.system()),
job.application(),
EnumSet.of(active, reserved))
.stream()
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder());
}
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(TenantAndApplicationId id) {
return controller.jobController().deploymentStatus(requireApplication(id)).jobs()
.production().asList().stream()
.map(this::oldestInstalledPlatform)
.flatMap(Optional::stream)
.min(naturalOrder())
.orElse(controller.readSystemVersion());
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(TenantAndApplicationId id, Credentials credentials) {
try (Lock lock = lock(id)) {
if (getApplication(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (getApplication(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
if (controller.tenants().get(id.tenant()).isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
accessControl.createApplication(id, credentials);
LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock);
store(locked);
log.info("Created " + locked);
return locked.get();
}
}
/**
* Creates a new instance for an existing application.
*
* @throws IllegalArgumentException if the instance already exists, or has an invalid instance name.
*/
public void createInstance(ApplicationId id) {
lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
store(withNewInstance(application, id));
});
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) {
return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version));
}
/** Returns given application with a new instance */
public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) {
if (instance.instance().isTester())
throw new IllegalArgumentException("'" + instance + "' is a tester application!");
InstanceId.validate(instance.instance().value());
if (getInstance(instance).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists");
if (getInstance(dashToUnderscore(instance)).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists");
log.info("Created " + instance);
return application.withNewInstance(instance.instance());
}
/** Deploys an application package for an existing application instance. */
public ActivateResult deploy(JobId job, boolean deploySourceVersions) {
if (job.application().instance().isTester())
throw new IllegalArgumentException("'" + job.application() + "' is a tester application!");
TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application());
ZoneId zone = job.type().zone(controller.system());
try (Lock deploymentLock = lockForDeployment(job.application(), zone)) {
Set<ContainerEndpoint> containerEndpoints;
Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
Optional<TenantRoles> tenantRoles = Optional.empty();
Run run = controller.jobController().last(job)
.orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'"));
if (run.hasEnded())
throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running");
Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform());
ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication());
ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision);
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
Instance instance = application.get().require(job.application().instance());
rejectOldChange(instance, platform, revision, job, zone);
if ( ! applicationPackage.trustedCertificates().isEmpty()
&& run.testerCertificate().isPresent())
applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get());
endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name()));
containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone);
}
ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles);
var quotaUsage = deploymentQuotaUsage(zone, job.application());
NotificationSource source = zone.environment().isManuallyDeployed() ?
NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId);
List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
.map(logs -> logs.stream()
.filter(log -> log.applicationPackage)
.filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue())
.map(log -> log.message)
.sorted()
.distinct()
.collect(Collectors.toList()))
.orElseGet(List::of);
if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage);
else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings);
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
instance -> instance.withNewDeployment(zone, revision, platform,
clock.instant(), warningsFrom(result),
quotaUsage))));
return result;
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) {
return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints,
Optional<EndpointCertificateMetadata> endpointCertificateMetadata,
Optional<TenantRoles> tenantRoles) {
try {
Optional<DockerImage> dockerImageRepo = Optional.ofNullable(
dockerImageRepoFlag
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
.value())
.filter(s -> !s.isBlank())
.map(DockerImage::fromString);
Optional<AthenzDomain> domain = controller.tenants().get(application.tenant())
.filter(tenant-> tenant instanceof AthenzTenant)
.map(tenant -> ((AthenzTenant)tenant).domain());
if (zone.environment().isManuallyDeployed())
controller.applications().applicationStore().putMeta(new DeploymentId(application, zone),
clock.instant(),
applicationPackage.metaDataZip());
Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()),
asList(application.tenant()), application, zone, applicationPackage.deploymentSpec());
List<TenantSecretStore> tenantSecretStores = controller.tenants()
.get(application.tenant())
.filter(tenant-> tenant instanceof CloudTenant)
.map(tenant -> ((CloudTenant) tenant).tenantSecretStores())
.orElse(List.of());
List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream()
.map(SupportAccessGrant::certificate)
.collect(toList());
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform,
endpoints, endpointCertificateMetadata, dockerImageRepo, domain,
tenantRoles, deploymentQuota, tenantSecretStores, operatorCertificates));
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream()
.map(Deployment::zone)
.filter(zone -> deploymentSpec.instance(instance).isEmpty()
|| ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(),
zone.region()))
.collect(toList());
if (deploymentsToRemove.isEmpty())
return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) +
" is deployed in " +
deploymentsToRemove.stream()
.map(zone -> zone.region().value())
.collect(joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance)
&& application.get().require(instance).deployments().size() == deploymentsToRemove.size();
for (ZoneId zone : deploymentsToRemove)
application = deactivate(application, instance, zone);
if (removeInstance)
application = application.without(instance);
return application;
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
public void deleteApplication(TenantAndApplicationId id, Credentials credentials) {
lockApplicationOrThrow(id, application -> {
var deployments = application.get().instances().values().stream()
.filter(instance -> ! instance.deployments().isEmpty())
.collect(toMap(instance -> instance.name(),
instance -> instance.deployments().keySet().stream()
.map(ZoneId::toString)
.collect(joining(", "))));
if ( ! deployments.isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments);
for (Instance instance : application.get().instances().values()) {
controller.routing().removeEndpointsInDns(application.get(), instance.name());
application = application.without(instance.name());
}
applicationStore.removeAll(id.tenant(), id.application());
applicationStore.removeAllTesters(id.tenant(), id.application());
applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant());
accessControl.deleteApplication(id, credentials);
curator.removeApplication(id);
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
log.info("Deleted " + id);
});
}
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId instanceId) {
if (getInstance(instanceId).isEmpty())
throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found");
lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> {
if ( ! application.get().require(instanceId.instance()).deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(joining(", ")));
if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty)
&& application.get().deploymentSpec().instanceNames().contains(instanceId.instance()))
throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first");
controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
log.info("Deleted " + instanceId);
});
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(requireApplication(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param restartFilter Variables to filter which nodes to restart.
*/
public void restart(DeploymentId deploymentId, RestartFilter restartFilter) {
configServer.restart(deploymentId, restartFilter);
}
/**
* Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual.
* If this cannot be ascertained, we must assumed it is not.
*/
public boolean isHealthy(DeploymentId deploymentId) {
try {
return ! isSuspended(deploymentId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e));
return false;
}
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
return configServer.isSuspended(deploymentId);
}
/** Sets suspension status of the given deployment in its zone. */
public void setSuspension(DeploymentId deploymentId, boolean suspend) {
configServer.setSuspension(deploymentId, suspend);
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId id, ZoneId zone) {
lockApplicationOrThrow(TenantAndApplicationId.from(id),
application -> store(deactivate(application, id.instance(), zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) {
DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone);
try {
configServer.deactivate(id);
} finally {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
if (!zone.environment().isTest())
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
}
return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(TenantAndApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName tenant where application should be deployed
* @param applicationPackage application package
* @param deployer principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain()
.map(domain -> new AthenzDomain(domain.value()));
if(identityDomain.isEmpty()) {
return;
}
if(! (accessControl instanceof AthenzFacade)) {
throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system.");
}
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
Optional<AthenzUser> athenzUser = getUser(deployer);
if (athenzUser.isPresent()) {
var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
var serviceToLaunch = instanceName
.flatMap(instance -> applicationPackage.deploymentSpec().instance(instance))
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> applicationPackage.deploymentSpec().athenzService())
.map(service -> new AthenzService(identityDomain.get(), service.value()));
if(serviceToLaunch.isPresent()) {
if (
! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) &&
! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get())
) {
throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " +
"service " + serviceToLaunch.get().getFullName() + ". " +
"Please reach out to the domain admin.");
}
} else {
throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value());
}
} else {
Tenant tenant = controller.tenants().require(tenantName);
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain, identityDomain.get()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
}
}
private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) {
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) return;
if (!zone.environment().isProduction()) return;
boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned();
boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 &&
!(revision.isUnknown() && controller.system().isCd());
if (platformIsOlder || revisionIsOlder)
throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion()));
}
private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) {
return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"));
}
private ApplicationId dashToUnderscore(ApplicationId id) {
return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance());
}
private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) {
var application = configServer.nodeRepository().getApplication(zoneId, applicationId);
return DeploymentQuotaCalculator.calculateQuotaUsage(application);
}
private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) {
return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone)
: applicationStore.get(application.tenant(), application.application(), revision));
}
/*
* Get the AthenzUser from this principal or Optional.empty if this does not represent a user.
*/
private Optional<AthenzUser> getUser(Optional<Principal> deployer) {
return deployer
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast);
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(domain -> {
controller.zoneRegistry().zones().reachable().ids().forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone);
deploymentSpec.athenzService().ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
deploymentSpec.instances().forEach(spec -> {
spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
});
});
});
}
private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) {
if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService))
throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName());
}
/** Returns the latest known version within the given major. */
public Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.readVersionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} |
`deleteInstance(ApplicationId)` already removes DNS records. | public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
var existingInstances = application.get().instances().keySet();
var declaredInstances = applicationPackage.deploymentSpec().instanceNames();
for (var name : declaredInstances)
if ( ! existingInstances.contains(name))
application = withNewInstance(application, application.get().id().instance(name));
for (InstanceName name : existingInstances) {
application = withoutDeletedDeployments(application, name);
}
for (InstanceName instance : declaredInstances)
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
controller.jobController().deploymentStatus(application.get());
for (var name : existingInstances)
if ( ! declaredInstances.contains(name))
controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
store(application);
return application;
} | controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name))); | public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
var existingInstances = application.get().instances().keySet();
var declaredInstances = applicationPackage.deploymentSpec().instanceNames();
for (var name : declaredInstances)
if ( ! existingInstances.contains(name))
application = withNewInstance(application, application.get().id().instance(name));
for (InstanceName name : existingInstances) {
application = withoutDeletedDeployments(application, name);
}
for (InstanceName instance : declaredInstances)
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
controller.jobController().deploymentStatus(application.get());
for (var name : existingInstances)
if ( ! declaredInstances.contains(name))
controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
store(application);
return application;
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final ApplicationPackageValidator applicationPackageValidator;
private final EndpointCertificates endpointCertificates;
private final StringFlag dockerImageRepoFlag;
private final BillingController billingController;
ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock,
FlagSource flagSource, BillingController billingController) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource);
this.billingController = billingController;
deploymentTrigger = new DeploymentTrigger(controller, clock);
applicationPackageValidator = new ApplicationPackageValidator(controller);
endpointCertificates = new EndpointCertificates(controller,
controller.serviceRegistry().endpointCertificateProvider(),
controller.serviceRegistry().endpointCertificateValidator());
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (TenantAndApplicationId id : curator.readApplicationIds()) {
lockApplicationIfPresent(id, application -> {
for (InstanceName instance : application.get().deploymentSpec().instanceNames())
if (!application.get().instances().containsKey(instance))
application = withNewInstance(application, id.instance(instance));
store(application);
});
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> getApplication(TenantAndApplicationId id) {
return curator.readApplication(id);
}
/** Returns the instance with the given id, or null if it is not present */
public Optional<Instance> getInstance(ApplicationId id) {
return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance()));
}
/**
* Triggers reindexing for the given document types in the given clusters, for the given application.
*
* If no clusters are given, reindexing is triggered for the entire application; otherwise
* if no documents types are given, reindexing is triggered for all given clusters; otherwise
* reindexing is triggered for the cartesian product of the given clusters and document types.
*/
public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) {
configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly);
}
/** Returns the reindexing status for the given application in the given zone. */
public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) {
return configServer.getReindexing(new DeploymentId(id, zoneId));
}
/** Enables reindexing for the given application in the given zone. */
public void enableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.enableReindexing(new DeploymentId(id, zoneId));
}
/** Disables reindexing for the given application in the given zone. */
public void disableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.disableReindexing(new DeploymentId(id, zoneId));
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application requireApplication(TenantAndApplicationId id) {
return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/**
* Returns the instance with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Instance requireInstance(ApplicationId id) {
return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return curator.readApplications(false);
}
/**
* Returns a snapshot of all readable applications. Unlike {@link ApplicationController
* applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete
* snapshot.
*
* This should only be used in cases where acting on a subset of applications is better than none.
*/
public List<Application> readable() {
return curator.readApplications(true);
}
/** Returns the ID of all known applications. */
public List<TenantAndApplicationId> idList() {
return curator.readApplicationIds();
}
/** Returns a snapshot of all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return curator.readApplications(tenant);
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns all currently reachable content clusters among the given deployments. */
public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) {
Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (DeploymentId id : ids)
if (isHealthy(id))
clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id)));
return Collections.unmodifiableMap(clusters);
}
/** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */
private Optional<Version> oldestInstalledPlatform(JobStatus job) {
Version oldest = null;
for (Run run : job.runs().descendingMap().values()) {
Version version = run.versions().targetPlatform();
if (oldest == null || version.isBefore(oldest))
oldest = version;
if (run.status() == RunStatus.success)
return Optional.of(oldest);
}
return oldestInstalledPlatform(job.id());
}
/** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */
private Optional<Version> oldestInstalledPlatform(JobId job) {
return configServer.nodeRepository().list(job.type().zone(controller.system()),
job.application(),
EnumSet.of(active, reserved))
.stream()
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder());
}
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(TenantAndApplicationId id) {
return controller.jobController().deploymentStatus(requireApplication(id)).jobs()
.production().asList().stream()
.map(this::oldestInstalledPlatform)
.flatMap(Optional::stream)
.min(naturalOrder())
.orElse(controller.readSystemVersion());
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(TenantAndApplicationId id, Credentials credentials) {
try (Lock lock = lock(id)) {
if (getApplication(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (getApplication(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
if (controller.tenants().get(id.tenant()).isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
accessControl.createApplication(id, credentials);
LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock);
store(locked);
log.info("Created " + locked);
return locked.get();
}
}
/**
* Creates a new instance for an existing application.
*
* @throws IllegalArgumentException if the instance already exists, or has an invalid instance name.
*/
public void createInstance(ApplicationId id) {
lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
store(withNewInstance(application, id));
});
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) {
return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version));
}
/** Returns given application with a new instance */
public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) {
if (instance.instance().isTester())
throw new IllegalArgumentException("'" + instance + "' is a tester application!");
InstanceId.validate(instance.instance().value());
if (getInstance(instance).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists");
if (getInstance(dashToUnderscore(instance)).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists");
log.info("Created " + instance);
return application.withNewInstance(instance.instance());
}
/** Deploys an application package for an existing application instance. */
public ActivateResult deploy(JobId job, boolean deploySourceVersions) {
if (job.application().instance().isTester())
throw new IllegalArgumentException("'" + job.application() + "' is a tester application!");
TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application());
ZoneId zone = job.type().zone(controller.system());
try (Lock deploymentLock = lockForDeployment(job.application(), zone)) {
Set<ContainerEndpoint> containerEndpoints;
Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
Optional<TenantRoles> tenantRoles = Optional.empty();
Run run = controller.jobController().last(job)
.orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'"));
if (run.hasEnded())
throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running");
Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform());
ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication());
ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision);
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
Instance instance = application.get().require(job.application().instance());
rejectOldChange(instance, platform, revision, job, zone);
if ( ! applicationPackage.trustedCertificates().isEmpty()
&& run.testerCertificate().isPresent())
applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get());
endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name()));
containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone);
}
ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles);
var quotaUsage = deploymentQuotaUsage(zone, job.application());
NotificationSource source = zone.environment().isManuallyDeployed() ?
NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId);
List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
.map(logs -> logs.stream()
.filter(log -> log.applicationPackage)
.filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue())
.map(log -> log.message)
.sorted()
.distinct()
.collect(Collectors.toList()))
.orElseGet(List::of);
if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage);
else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings);
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
instance -> instance.withNewDeployment(zone, revision, platform,
clock.instant(), warningsFrom(result),
quotaUsage))));
return result;
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) {
return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints,
Optional<EndpointCertificateMetadata> endpointCertificateMetadata,
Optional<TenantRoles> tenantRoles) {
try {
Optional<DockerImage> dockerImageRepo = Optional.ofNullable(
dockerImageRepoFlag
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
.value())
.filter(s -> !s.isBlank())
.map(DockerImage::fromString);
Optional<AthenzDomain> domain = controller.tenants().get(application.tenant())
.filter(tenant-> tenant instanceof AthenzTenant)
.map(tenant -> ((AthenzTenant)tenant).domain());
if (zone.environment().isManuallyDeployed())
controller.applications().applicationStore().putMeta(new DeploymentId(application, zone),
clock.instant(),
applicationPackage.metaDataZip());
Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()),
asList(application.tenant()), application, zone, applicationPackage.deploymentSpec());
List<TenantSecretStore> tenantSecretStores = controller.tenants()
.get(application.tenant())
.filter(tenant-> tenant instanceof CloudTenant)
.map(tenant -> ((CloudTenant) tenant).tenantSecretStores())
.orElse(List.of());
List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream()
.map(SupportAccessGrant::certificate)
.collect(toList());
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform,
endpoints, endpointCertificateMetadata, dockerImageRepo, domain,
tenantRoles, deploymentQuota, tenantSecretStores, operatorCertificates));
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream()
.map(Deployment::zone)
.filter(zone -> deploymentSpec.instance(instance).isEmpty()
|| ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(),
zone.region()))
.collect(toList());
if (deploymentsToRemove.isEmpty())
return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) +
" is deployed in " +
deploymentsToRemove.stream()
.map(zone -> zone.region().value())
.collect(joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance)
&& application.get().require(instance).deployments().size() == deploymentsToRemove.size();
for (ZoneId zone : deploymentsToRemove)
application = deactivate(application, instance, zone);
if (removeInstance)
application = application.without(instance);
return application;
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
public void deleteApplication(TenantAndApplicationId id, Credentials credentials) {
lockApplicationOrThrow(id, application -> {
var deployments = application.get().instances().values().stream()
.filter(instance -> ! instance.deployments().isEmpty())
.collect(toMap(instance -> instance.name(),
instance -> instance.deployments().keySet().stream()
.map(ZoneId::toString)
.collect(joining(", "))));
if ( ! deployments.isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments);
for (Instance instance : application.get().instances().values()) {
controller.routing().removeEndpointsInDns(application.get(), instance.name());
application = application.without(instance.name());
}
applicationStore.removeAll(id.tenant(), id.application());
applicationStore.removeAllTesters(id.tenant(), id.application());
applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant());
accessControl.deleteApplication(id, credentials);
curator.removeApplication(id);
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
log.info("Deleted " + id);
});
}
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId instanceId) {
if (getInstance(instanceId).isEmpty())
throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found");
lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> {
if ( ! application.get().require(instanceId.instance()).deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(joining(", ")));
if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty)
&& application.get().deploymentSpec().instanceNames().contains(instanceId.instance()))
throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first");
controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
log.info("Deleted " + instanceId);
});
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(requireApplication(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param restartFilter Variables to filter which nodes to restart.
*/
public void restart(DeploymentId deploymentId, RestartFilter restartFilter) {
configServer.restart(deploymentId, restartFilter);
}
/**
* Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual.
* If this cannot be ascertained, we must assumed it is not.
*/
public boolean isHealthy(DeploymentId deploymentId) {
try {
return ! isSuspended(deploymentId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e));
return false;
}
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
return configServer.isSuspended(deploymentId);
}
/** Sets suspension status of the given deployment in its zone. */
public void setSuspension(DeploymentId deploymentId, boolean suspend) {
configServer.setSuspension(deploymentId, suspend);
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId id, ZoneId zone) {
lockApplicationOrThrow(TenantAndApplicationId.from(id),
application -> store(deactivate(application, id.instance(), zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) {
DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone);
try {
configServer.deactivate(id);
} finally {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
if (!zone.environment().isTest())
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
}
return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(TenantAndApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName tenant where application should be deployed
* @param applicationPackage application package
* @param deployer principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain()
.map(domain -> new AthenzDomain(domain.value()));
if(identityDomain.isEmpty()) {
return;
}
if(! (accessControl instanceof AthenzFacade)) {
throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system.");
}
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
Optional<AthenzUser> athenzUser = getUser(deployer);
if (athenzUser.isPresent()) {
var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
var serviceToLaunch = instanceName
.flatMap(instance -> applicationPackage.deploymentSpec().instance(instance))
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> applicationPackage.deploymentSpec().athenzService())
.map(service -> new AthenzService(identityDomain.get(), service.value()));
if(serviceToLaunch.isPresent()) {
if (
! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) &&
! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get())
) {
throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " +
"service " + serviceToLaunch.get().getFullName() + ". " +
"Please reach out to the domain admin.");
}
} else {
throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value());
}
} else {
Tenant tenant = controller.tenants().require(tenantName);
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain, identityDomain.get()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
}
}
private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) {
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) return;
if (!zone.environment().isProduction()) return;
boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned();
boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 &&
!(revision.isUnknown() && controller.system().isCd());
if (platformIsOlder || revisionIsOlder)
throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion()));
}
private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) {
return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"));
}
private ApplicationId dashToUnderscore(ApplicationId id) {
return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance());
}
private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) {
var application = configServer.nodeRepository().getApplication(zoneId, applicationId);
return DeploymentQuotaCalculator.calculateQuotaUsage(application);
}
private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) {
return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone)
: applicationStore.get(application.tenant(), application.application(), revision));
}
/*
* Get the AthenzUser from this principal or Optional.empty if this does not represent a user.
*/
private Optional<AthenzUser> getUser(Optional<Principal> deployer) {
return deployer
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast);
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(domain -> {
controller.zoneRegistry().zones().reachable().ids().forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone);
deploymentSpec.athenzService().ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
deploymentSpec.instances().forEach(spec -> {
spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
});
});
});
}
private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) {
if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService))
throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName());
}
/** Returns the latest known version within the given major. */
public Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.readVersionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final ApplicationPackageValidator applicationPackageValidator;
private final EndpointCertificates endpointCertificates;
private final StringFlag dockerImageRepoFlag;
private final BillingController billingController;
ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock,
FlagSource flagSource, BillingController billingController) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource);
this.billingController = billingController;
deploymentTrigger = new DeploymentTrigger(controller, clock);
applicationPackageValidator = new ApplicationPackageValidator(controller);
endpointCertificates = new EndpointCertificates(controller,
controller.serviceRegistry().endpointCertificateProvider(),
controller.serviceRegistry().endpointCertificateValidator());
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (TenantAndApplicationId id : curator.readApplicationIds()) {
lockApplicationIfPresent(id, application -> {
for (InstanceName instance : application.get().deploymentSpec().instanceNames())
if (!application.get().instances().containsKey(instance))
application = withNewInstance(application, id.instance(instance));
store(application);
});
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> getApplication(TenantAndApplicationId id) {
return curator.readApplication(id);
}
/** Returns the instance with the given id, or null if it is not present */
public Optional<Instance> getInstance(ApplicationId id) {
return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance()));
}
/**
* Triggers reindexing for the given document types in the given clusters, for the given application.
*
* If no clusters are given, reindexing is triggered for the entire application; otherwise
* if no documents types are given, reindexing is triggered for all given clusters; otherwise
* reindexing is triggered for the cartesian product of the given clusters and document types.
*/
public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) {
configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly);
}
/** Returns the reindexing status for the given application in the given zone. */
public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) {
return configServer.getReindexing(new DeploymentId(id, zoneId));
}
/** Enables reindexing for the given application in the given zone. */
public void enableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.enableReindexing(new DeploymentId(id, zoneId));
}
/** Disables reindexing for the given application in the given zone. */
public void disableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.disableReindexing(new DeploymentId(id, zoneId));
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application requireApplication(TenantAndApplicationId id) {
return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/**
* Returns the instance with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Instance requireInstance(ApplicationId id) {
return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return curator.readApplications(false);
}
/**
* Returns a snapshot of all readable applications. Unlike {@link ApplicationController
* applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete
* snapshot.
*
* This should only be used in cases where acting on a subset of applications is better than none.
*/
public List<Application> readable() {
return curator.readApplications(true);
}
/** Returns the ID of all known applications. */
public List<TenantAndApplicationId> idList() {
return curator.readApplicationIds();
}
/** Returns a snapshot of all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return curator.readApplications(tenant);
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns all currently reachable content clusters among the given deployments. */
public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) {
Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (DeploymentId id : ids)
if (isHealthy(id))
clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id)));
return Collections.unmodifiableMap(clusters);
}
/** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */
private Optional<Version> oldestInstalledPlatform(JobStatus job) {
Version oldest = null;
for (Run run : job.runs().descendingMap().values()) {
Version version = run.versions().targetPlatform();
if (oldest == null || version.isBefore(oldest))
oldest = version;
if (run.status() == RunStatus.success)
return Optional.of(oldest);
}
return oldestInstalledPlatform(job.id());
}
/** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */
private Optional<Version> oldestInstalledPlatform(JobId job) {
return configServer.nodeRepository().list(job.type().zone(controller.system()),
job.application(),
EnumSet.of(active, reserved))
.stream()
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder());
}
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(TenantAndApplicationId id) {
return controller.jobController().deploymentStatus(requireApplication(id)).jobs()
.production().asList().stream()
.map(this::oldestInstalledPlatform)
.flatMap(Optional::stream)
.min(naturalOrder())
.orElse(controller.readSystemVersion());
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(TenantAndApplicationId id, Credentials credentials) {
try (Lock lock = lock(id)) {
if (getApplication(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (getApplication(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
if (controller.tenants().get(id.tenant()).isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
accessControl.createApplication(id, credentials);
LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock);
store(locked);
log.info("Created " + locked);
return locked.get();
}
}
/**
* Creates a new instance for an existing application.
*
* @throws IllegalArgumentException if the instance already exists, or has an invalid instance name.
*/
public void createInstance(ApplicationId id) {
lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
store(withNewInstance(application, id));
});
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) {
return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version));
}
/** Returns given application with a new instance */
public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) {
if (instance.instance().isTester())
throw new IllegalArgumentException("'" + instance + "' is a tester application!");
InstanceId.validate(instance.instance().value());
if (getInstance(instance).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists");
if (getInstance(dashToUnderscore(instance)).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists");
log.info("Created " + instance);
return application.withNewInstance(instance.instance());
}
/** Deploys an application package for an existing application instance. */
public ActivateResult deploy(JobId job, boolean deploySourceVersions) {
if (job.application().instance().isTester())
throw new IllegalArgumentException("'" + job.application() + "' is a tester application!");
TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application());
ZoneId zone = job.type().zone(controller.system());
try (Lock deploymentLock = lockForDeployment(job.application(), zone)) {
Set<ContainerEndpoint> containerEndpoints;
Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
Optional<TenantRoles> tenantRoles = Optional.empty();
Run run = controller.jobController().last(job)
.orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'"));
if (run.hasEnded())
throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running");
Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform());
ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication());
ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision);
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
Instance instance = application.get().require(job.application().instance());
rejectOldChange(instance, platform, revision, job, zone);
if ( ! applicationPackage.trustedCertificates().isEmpty()
&& run.testerCertificate().isPresent())
applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get());
endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name()));
containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone);
}
ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles);
var quotaUsage = deploymentQuotaUsage(zone, job.application());
NotificationSource source = zone.environment().isManuallyDeployed() ?
NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId);
List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
.map(logs -> logs.stream()
.filter(log -> log.applicationPackage)
.filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue())
.map(log -> log.message)
.sorted()
.distinct()
.collect(Collectors.toList()))
.orElseGet(List::of);
if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage);
else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings);
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
instance -> instance.withNewDeployment(zone, revision, platform,
clock.instant(), warningsFrom(result),
quotaUsage))));
return result;
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) {
return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints,
Optional<EndpointCertificateMetadata> endpointCertificateMetadata,
Optional<TenantRoles> tenantRoles) {
try {
Optional<DockerImage> dockerImageRepo = Optional.ofNullable(
dockerImageRepoFlag
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
.value())
.filter(s -> !s.isBlank())
.map(DockerImage::fromString);
Optional<AthenzDomain> domain = controller.tenants().get(application.tenant())
.filter(tenant-> tenant instanceof AthenzTenant)
.map(tenant -> ((AthenzTenant)tenant).domain());
if (zone.environment().isManuallyDeployed())
controller.applications().applicationStore().putMeta(new DeploymentId(application, zone),
clock.instant(),
applicationPackage.metaDataZip());
Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()),
asList(application.tenant()), application, zone, applicationPackage.deploymentSpec());
List<TenantSecretStore> tenantSecretStores = controller.tenants()
.get(application.tenant())
.filter(tenant-> tenant instanceof CloudTenant)
.map(tenant -> ((CloudTenant) tenant).tenantSecretStores())
.orElse(List.of());
List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream()
.map(SupportAccessGrant::certificate)
.collect(toList());
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform,
endpoints, endpointCertificateMetadata, dockerImageRepo, domain,
tenantRoles, deploymentQuota, tenantSecretStores, operatorCertificates));
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream()
.map(Deployment::zone)
.filter(zone -> deploymentSpec.instance(instance).isEmpty()
|| ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(),
zone.region()))
.collect(toList());
if (deploymentsToRemove.isEmpty())
return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) +
" is deployed in " +
deploymentsToRemove.stream()
.map(zone -> zone.region().value())
.collect(joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance)
&& application.get().require(instance).deployments().size() == deploymentsToRemove.size();
for (ZoneId zone : deploymentsToRemove)
application = deactivate(application, instance, zone);
if (removeInstance)
application = application.without(instance);
return application;
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
public void deleteApplication(TenantAndApplicationId id, Credentials credentials) {
lockApplicationOrThrow(id, application -> {
var deployments = application.get().instances().values().stream()
.filter(instance -> ! instance.deployments().isEmpty())
.collect(toMap(instance -> instance.name(),
instance -> instance.deployments().keySet().stream()
.map(ZoneId::toString)
.collect(joining(", "))));
if ( ! deployments.isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments);
for (Instance instance : application.get().instances().values()) {
controller.routing().removeEndpointsInDns(application.get(), instance.name());
application = application.without(instance.name());
}
applicationStore.removeAll(id.tenant(), id.application());
applicationStore.removeAllTesters(id.tenant(), id.application());
applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant());
accessControl.deleteApplication(id, credentials);
curator.removeApplication(id);
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
log.info("Deleted " + id);
});
}
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId instanceId) {
if (getInstance(instanceId).isEmpty())
throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found");
lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> {
if ( ! application.get().require(instanceId.instance()).deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(joining(", ")));
if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty)
&& application.get().deploymentSpec().instanceNames().contains(instanceId.instance()))
throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first");
controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
log.info("Deleted " + instanceId);
});
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(requireApplication(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param restartFilter Variables to filter which nodes to restart.
*/
public void restart(DeploymentId deploymentId, RestartFilter restartFilter) {
configServer.restart(deploymentId, restartFilter);
}
/**
* Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual.
* If this cannot be ascertained, we must assumed it is not.
*/
public boolean isHealthy(DeploymentId deploymentId) {
try {
return ! isSuspended(deploymentId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e));
return false;
}
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
return configServer.isSuspended(deploymentId);
}
/** Sets suspension status of the given deployment in its zone. */
public void setSuspension(DeploymentId deploymentId, boolean suspend) {
configServer.setSuspension(deploymentId, suspend);
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId id, ZoneId zone) {
lockApplicationOrThrow(TenantAndApplicationId.from(id),
application -> store(deactivate(application, id.instance(), zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) {
DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone);
try {
configServer.deactivate(id);
} finally {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
if (!zone.environment().isTest())
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
}
return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(TenantAndApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName tenant where application should be deployed
* @param applicationPackage application package
* @param deployer principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain()
.map(domain -> new AthenzDomain(domain.value()));
if(identityDomain.isEmpty()) {
return;
}
if(! (accessControl instanceof AthenzFacade)) {
throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system.");
}
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
Optional<AthenzUser> athenzUser = getUser(deployer);
if (athenzUser.isPresent()) {
var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
var serviceToLaunch = instanceName
.flatMap(instance -> applicationPackage.deploymentSpec().instance(instance))
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> applicationPackage.deploymentSpec().athenzService())
.map(service -> new AthenzService(identityDomain.get(), service.value()));
if(serviceToLaunch.isPresent()) {
if (
! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) &&
! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get())
) {
throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " +
"service " + serviceToLaunch.get().getFullName() + ". " +
"Please reach out to the domain admin.");
}
} else {
throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value());
}
} else {
Tenant tenant = controller.tenants().require(tenantName);
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain, identityDomain.get()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
}
}
private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) {
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) return;
if (!zone.environment().isProduction()) return;
boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned();
boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 &&
!(revision.isUnknown() && controller.system().isCd());
if (platformIsOlder || revisionIsOlder)
throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion()));
}
private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) {
return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"));
}
private ApplicationId dashToUnderscore(ApplicationId id) {
return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance());
}
private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) {
var application = configServer.nodeRepository().getApplication(zoneId, applicationId);
return DeploymentQuotaCalculator.calculateQuotaUsage(application);
}
private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) {
return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone)
: applicationStore.get(application.tenant(), application.application(), revision));
}
/*
* Get the AthenzUser from this principal or Optional.empty if this does not represent a user.
*/
private Optional<AthenzUser> getUser(Optional<Principal> deployer) {
return deployer
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast);
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(domain -> {
controller.zoneRegistry().zones().reachable().ids().forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone);
deploymentSpec.athenzService().ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
deploymentSpec.instances().forEach(spec -> {
spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
});
});
});
}
private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) {
if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService))
throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName());
}
/** Returns the latest known version within the given major. */
public Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.readVersionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} |
Yes... The problem here is that when an instance is removed from `deployment.xml`, `deleteInstance(ApplicationId)` is never called. | public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
var existingInstances = application.get().instances().keySet();
var declaredInstances = applicationPackage.deploymentSpec().instanceNames();
for (var name : declaredInstances)
if ( ! existingInstances.contains(name))
application = withNewInstance(application, application.get().id().instance(name));
for (InstanceName name : existingInstances) {
application = withoutDeletedDeployments(application, name);
}
for (InstanceName instance : declaredInstances)
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
controller.jobController().deploymentStatus(application.get());
for (var name : existingInstances)
if ( ! declaredInstances.contains(name))
controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
store(application);
return application;
} | controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name))); | public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
var existingInstances = application.get().instances().keySet();
var declaredInstances = applicationPackage.deploymentSpec().instanceNames();
for (var name : declaredInstances)
if ( ! existingInstances.contains(name))
application = withNewInstance(application, application.get().id().instance(name));
for (InstanceName name : existingInstances) {
application = withoutDeletedDeployments(application, name);
}
for (InstanceName instance : declaredInstances)
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
controller.jobController().deploymentStatus(application.get());
for (var name : existingInstances)
if ( ! declaredInstances.contains(name))
controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
store(application);
return application;
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final ApplicationPackageValidator applicationPackageValidator;
private final EndpointCertificates endpointCertificates;
private final StringFlag dockerImageRepoFlag;
private final BillingController billingController;
ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock,
FlagSource flagSource, BillingController billingController) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource);
this.billingController = billingController;
deploymentTrigger = new DeploymentTrigger(controller, clock);
applicationPackageValidator = new ApplicationPackageValidator(controller);
endpointCertificates = new EndpointCertificates(controller,
controller.serviceRegistry().endpointCertificateProvider(),
controller.serviceRegistry().endpointCertificateValidator());
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (TenantAndApplicationId id : curator.readApplicationIds()) {
lockApplicationIfPresent(id, application -> {
for (InstanceName instance : application.get().deploymentSpec().instanceNames())
if (!application.get().instances().containsKey(instance))
application = withNewInstance(application, id.instance(instance));
store(application);
});
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> getApplication(TenantAndApplicationId id) {
return curator.readApplication(id);
}
/** Returns the instance with the given id, or null if it is not present */
public Optional<Instance> getInstance(ApplicationId id) {
return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance()));
}
/**
* Triggers reindexing for the given document types in the given clusters, for the given application.
*
* If no clusters are given, reindexing is triggered for the entire application; otherwise
* if no documents types are given, reindexing is triggered for all given clusters; otherwise
* reindexing is triggered for the cartesian product of the given clusters and document types.
*/
public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) {
configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly);
}
/** Returns the reindexing status for the given application in the given zone. */
public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) {
return configServer.getReindexing(new DeploymentId(id, zoneId));
}
/** Enables reindexing for the given application in the given zone. */
public void enableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.enableReindexing(new DeploymentId(id, zoneId));
}
/** Disables reindexing for the given application in the given zone. */
public void disableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.disableReindexing(new DeploymentId(id, zoneId));
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application requireApplication(TenantAndApplicationId id) {
return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/**
* Returns the instance with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Instance requireInstance(ApplicationId id) {
return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return curator.readApplications(false);
}
/**
* Returns a snapshot of all readable applications. Unlike {@link ApplicationController
* applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete
* snapshot.
*
* This should only be used in cases where acting on a subset of applications is better than none.
*/
public List<Application> readable() {
return curator.readApplications(true);
}
/** Returns the ID of all known applications. */
public List<TenantAndApplicationId> idList() {
return curator.readApplicationIds();
}
/** Returns a snapshot of all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return curator.readApplications(tenant);
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns all currently reachable content clusters among the given deployments. */
public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) {
Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (DeploymentId id : ids)
if (isHealthy(id))
clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id)));
return Collections.unmodifiableMap(clusters);
}
/** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */
private Optional<Version> oldestInstalledPlatform(JobStatus job) {
Version oldest = null;
for (Run run : job.runs().descendingMap().values()) {
Version version = run.versions().targetPlatform();
if (oldest == null || version.isBefore(oldest))
oldest = version;
if (run.status() == RunStatus.success)
return Optional.of(oldest);
}
return oldestInstalledPlatform(job.id());
}
/** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */
private Optional<Version> oldestInstalledPlatform(JobId job) {
return configServer.nodeRepository().list(job.type().zone(controller.system()),
job.application(),
EnumSet.of(active, reserved))
.stream()
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder());
}
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(TenantAndApplicationId id) {
return controller.jobController().deploymentStatus(requireApplication(id)).jobs()
.production().asList().stream()
.map(this::oldestInstalledPlatform)
.flatMap(Optional::stream)
.min(naturalOrder())
.orElse(controller.readSystemVersion());
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(TenantAndApplicationId id, Credentials credentials) {
try (Lock lock = lock(id)) {
if (getApplication(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (getApplication(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
if (controller.tenants().get(id.tenant()).isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
accessControl.createApplication(id, credentials);
LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock);
store(locked);
log.info("Created " + locked);
return locked.get();
}
}
/**
* Creates a new instance for an existing application.
*
* @throws IllegalArgumentException if the instance already exists, or has an invalid instance name.
*/
public void createInstance(ApplicationId id) {
lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
store(withNewInstance(application, id));
});
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) {
return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version));
}
/** Returns given application with a new instance */
public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) {
if (instance.instance().isTester())
throw new IllegalArgumentException("'" + instance + "' is a tester application!");
InstanceId.validate(instance.instance().value());
if (getInstance(instance).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists");
if (getInstance(dashToUnderscore(instance)).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists");
log.info("Created " + instance);
return application.withNewInstance(instance.instance());
}
/** Deploys an application package for an existing application instance. */
public ActivateResult deploy(JobId job, boolean deploySourceVersions) {
if (job.application().instance().isTester())
throw new IllegalArgumentException("'" + job.application() + "' is a tester application!");
TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application());
ZoneId zone = job.type().zone(controller.system());
try (Lock deploymentLock = lockForDeployment(job.application(), zone)) {
Set<ContainerEndpoint> containerEndpoints;
Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
Optional<TenantRoles> tenantRoles = Optional.empty();
Run run = controller.jobController().last(job)
.orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'"));
if (run.hasEnded())
throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running");
Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform());
ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication());
ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision);
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
Instance instance = application.get().require(job.application().instance());
rejectOldChange(instance, platform, revision, job, zone);
if ( ! applicationPackage.trustedCertificates().isEmpty()
&& run.testerCertificate().isPresent())
applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get());
endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name()));
containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone);
}
ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles);
var quotaUsage = deploymentQuotaUsage(zone, job.application());
NotificationSource source = zone.environment().isManuallyDeployed() ?
NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId);
List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
.map(logs -> logs.stream()
.filter(log -> log.applicationPackage)
.filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue())
.map(log -> log.message)
.sorted()
.distinct()
.collect(Collectors.toList()))
.orElseGet(List::of);
if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage);
else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings);
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
instance -> instance.withNewDeployment(zone, revision, platform,
clock.instant(), warningsFrom(result),
quotaUsage))));
return result;
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) {
return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints,
Optional<EndpointCertificateMetadata> endpointCertificateMetadata,
Optional<TenantRoles> tenantRoles) {
try {
Optional<DockerImage> dockerImageRepo = Optional.ofNullable(
dockerImageRepoFlag
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
.value())
.filter(s -> !s.isBlank())
.map(DockerImage::fromString);
Optional<AthenzDomain> domain = controller.tenants().get(application.tenant())
.filter(tenant-> tenant instanceof AthenzTenant)
.map(tenant -> ((AthenzTenant)tenant).domain());
if (zone.environment().isManuallyDeployed())
controller.applications().applicationStore().putMeta(new DeploymentId(application, zone),
clock.instant(),
applicationPackage.metaDataZip());
Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()),
asList(application.tenant()), application, zone, applicationPackage.deploymentSpec());
List<TenantSecretStore> tenantSecretStores = controller.tenants()
.get(application.tenant())
.filter(tenant-> tenant instanceof CloudTenant)
.map(tenant -> ((CloudTenant) tenant).tenantSecretStores())
.orElse(List.of());
List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream()
.map(SupportAccessGrant::certificate)
.collect(toList());
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform,
endpoints, endpointCertificateMetadata, dockerImageRepo, domain,
tenantRoles, deploymentQuota, tenantSecretStores, operatorCertificates));
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream()
.map(Deployment::zone)
.filter(zone -> deploymentSpec.instance(instance).isEmpty()
|| ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(),
zone.region()))
.collect(toList());
if (deploymentsToRemove.isEmpty())
return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) +
" is deployed in " +
deploymentsToRemove.stream()
.map(zone -> zone.region().value())
.collect(joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance)
&& application.get().require(instance).deployments().size() == deploymentsToRemove.size();
for (ZoneId zone : deploymentsToRemove)
application = deactivate(application, instance, zone);
if (removeInstance)
application = application.without(instance);
return application;
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
public void deleteApplication(TenantAndApplicationId id, Credentials credentials) {
lockApplicationOrThrow(id, application -> {
var deployments = application.get().instances().values().stream()
.filter(instance -> ! instance.deployments().isEmpty())
.collect(toMap(instance -> instance.name(),
instance -> instance.deployments().keySet().stream()
.map(ZoneId::toString)
.collect(joining(", "))));
if ( ! deployments.isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments);
for (Instance instance : application.get().instances().values()) {
controller.routing().removeEndpointsInDns(application.get(), instance.name());
application = application.without(instance.name());
}
applicationStore.removeAll(id.tenant(), id.application());
applicationStore.removeAllTesters(id.tenant(), id.application());
applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant());
accessControl.deleteApplication(id, credentials);
curator.removeApplication(id);
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
log.info("Deleted " + id);
});
}
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId instanceId) {
if (getInstance(instanceId).isEmpty())
throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found");
lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> {
if ( ! application.get().require(instanceId.instance()).deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(joining(", ")));
if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty)
&& application.get().deploymentSpec().instanceNames().contains(instanceId.instance()))
throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first");
controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
log.info("Deleted " + instanceId);
});
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(requireApplication(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param restartFilter Variables to filter which nodes to restart.
*/
public void restart(DeploymentId deploymentId, RestartFilter restartFilter) {
configServer.restart(deploymentId, restartFilter);
}
/**
* Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual.
* If this cannot be ascertained, we must assumed it is not.
*/
public boolean isHealthy(DeploymentId deploymentId) {
try {
return ! isSuspended(deploymentId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e));
return false;
}
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
return configServer.isSuspended(deploymentId);
}
/** Sets suspension status of the given deployment in its zone. */
public void setSuspension(DeploymentId deploymentId, boolean suspend) {
configServer.setSuspension(deploymentId, suspend);
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId id, ZoneId zone) {
lockApplicationOrThrow(TenantAndApplicationId.from(id),
application -> store(deactivate(application, id.instance(), zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) {
DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone);
try {
configServer.deactivate(id);
} finally {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
if (!zone.environment().isTest())
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
}
return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(TenantAndApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName tenant where application should be deployed
* @param applicationPackage application package
* @param deployer principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain()
.map(domain -> new AthenzDomain(domain.value()));
if(identityDomain.isEmpty()) {
return;
}
if(! (accessControl instanceof AthenzFacade)) {
throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system.");
}
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
Optional<AthenzUser> athenzUser = getUser(deployer);
if (athenzUser.isPresent()) {
var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
var serviceToLaunch = instanceName
.flatMap(instance -> applicationPackage.deploymentSpec().instance(instance))
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> applicationPackage.deploymentSpec().athenzService())
.map(service -> new AthenzService(identityDomain.get(), service.value()));
if(serviceToLaunch.isPresent()) {
if (
! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) &&
! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get())
) {
throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " +
"service " + serviceToLaunch.get().getFullName() + ". " +
"Please reach out to the domain admin.");
}
} else {
throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value());
}
} else {
Tenant tenant = controller.tenants().require(tenantName);
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain, identityDomain.get()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
}
}
private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) {
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) return;
if (!zone.environment().isProduction()) return;
boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned();
boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 &&
!(revision.isUnknown() && controller.system().isCd());
if (platformIsOlder || revisionIsOlder)
throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion()));
}
private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) {
return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"));
}
private ApplicationId dashToUnderscore(ApplicationId id) {
return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance());
}
private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) {
var application = configServer.nodeRepository().getApplication(zoneId, applicationId);
return DeploymentQuotaCalculator.calculateQuotaUsage(application);
}
private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) {
return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone)
: applicationStore.get(application.tenant(), application.application(), revision));
}
/*
* Get the AthenzUser from this principal or Optional.empty if this does not represent a user.
*/
private Optional<AthenzUser> getUser(Optional<Principal> deployer) {
return deployer
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast);
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(domain -> {
controller.zoneRegistry().zones().reachable().ids().forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone);
deploymentSpec.athenzService().ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
deploymentSpec.instances().forEach(spec -> {
spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
});
});
});
}
private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) {
if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService))
throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName());
}
/** Returns the latest known version within the given major. */
public Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.readVersionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final ApplicationPackageValidator applicationPackageValidator;
private final EndpointCertificates endpointCertificates;
private final StringFlag dockerImageRepoFlag;
private final BillingController billingController;
ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock,
FlagSource flagSource, BillingController billingController) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource);
this.billingController = billingController;
deploymentTrigger = new DeploymentTrigger(controller, clock);
applicationPackageValidator = new ApplicationPackageValidator(controller);
endpointCertificates = new EndpointCertificates(controller,
controller.serviceRegistry().endpointCertificateProvider(),
controller.serviceRegistry().endpointCertificateValidator());
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (TenantAndApplicationId id : curator.readApplicationIds()) {
lockApplicationIfPresent(id, application -> {
for (InstanceName instance : application.get().deploymentSpec().instanceNames())
if (!application.get().instances().containsKey(instance))
application = withNewInstance(application, id.instance(instance));
store(application);
});
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> getApplication(TenantAndApplicationId id) {
return curator.readApplication(id);
}
/** Returns the instance with the given id, or null if it is not present */
public Optional<Instance> getInstance(ApplicationId id) {
return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance()));
}
/**
* Triggers reindexing for the given document types in the given clusters, for the given application.
*
* If no clusters are given, reindexing is triggered for the entire application; otherwise
* if no documents types are given, reindexing is triggered for all given clusters; otherwise
* reindexing is triggered for the cartesian product of the given clusters and document types.
*/
public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) {
configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly);
}
/** Returns the reindexing status for the given application in the given zone. */
public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) {
return configServer.getReindexing(new DeploymentId(id, zoneId));
}
/** Enables reindexing for the given application in the given zone. */
public void enableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.enableReindexing(new DeploymentId(id, zoneId));
}
/** Disables reindexing for the given application in the given zone. */
public void disableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.disableReindexing(new DeploymentId(id, zoneId));
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application requireApplication(TenantAndApplicationId id) {
return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/**
* Returns the instance with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Instance requireInstance(ApplicationId id) {
return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return curator.readApplications(false);
}
/**
* Returns a snapshot of all readable applications. Unlike {@link ApplicationController
* applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete
* snapshot.
*
* This should only be used in cases where acting on a subset of applications is better than none.
*/
public List<Application> readable() {
return curator.readApplications(true);
}
/** Returns the ID of all known applications. */
public List<TenantAndApplicationId> idList() {
return curator.readApplicationIds();
}
/** Returns a snapshot of all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return curator.readApplications(tenant);
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns all currently reachable content clusters among the given deployments. */
public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) {
Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (DeploymentId id : ids)
if (isHealthy(id))
clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id)));
return Collections.unmodifiableMap(clusters);
}
/** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */
private Optional<Version> oldestInstalledPlatform(JobStatus job) {
Version oldest = null;
for (Run run : job.runs().descendingMap().values()) {
Version version = run.versions().targetPlatform();
if (oldest == null || version.isBefore(oldest))
oldest = version;
if (run.status() == RunStatus.success)
return Optional.of(oldest);
}
return oldestInstalledPlatform(job.id());
}
/** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */
private Optional<Version> oldestInstalledPlatform(JobId job) {
return configServer.nodeRepository().list(job.type().zone(controller.system()),
job.application(),
EnumSet.of(active, reserved))
.stream()
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder());
}
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(TenantAndApplicationId id) {
return controller.jobController().deploymentStatus(requireApplication(id)).jobs()
.production().asList().stream()
.map(this::oldestInstalledPlatform)
.flatMap(Optional::stream)
.min(naturalOrder())
.orElse(controller.readSystemVersion());
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(TenantAndApplicationId id, Credentials credentials) {
try (Lock lock = lock(id)) {
if (getApplication(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (getApplication(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
if (controller.tenants().get(id.tenant()).isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
accessControl.createApplication(id, credentials);
LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock);
store(locked);
log.info("Created " + locked);
return locked.get();
}
}
/**
* Creates a new instance for an existing application.
*
* @throws IllegalArgumentException if the instance already exists, or has an invalid instance name.
*/
public void createInstance(ApplicationId id) {
lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
store(withNewInstance(application, id));
});
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) {
return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version));
}
/** Returns given application with a new instance */
public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) {
if (instance.instance().isTester())
throw new IllegalArgumentException("'" + instance + "' is a tester application!");
InstanceId.validate(instance.instance().value());
if (getInstance(instance).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists");
if (getInstance(dashToUnderscore(instance)).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists");
log.info("Created " + instance);
return application.withNewInstance(instance.instance());
}
/** Deploys an application package for an existing application instance. */
public ActivateResult deploy(JobId job, boolean deploySourceVersions) {
if (job.application().instance().isTester())
throw new IllegalArgumentException("'" + job.application() + "' is a tester application!");
TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application());
ZoneId zone = job.type().zone(controller.system());
try (Lock deploymentLock = lockForDeployment(job.application(), zone)) {
Set<ContainerEndpoint> containerEndpoints;
Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
Optional<TenantRoles> tenantRoles = Optional.empty();
Run run = controller.jobController().last(job)
.orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'"));
if (run.hasEnded())
throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running");
Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform());
ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication());
ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision);
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
Instance instance = application.get().require(job.application().instance());
rejectOldChange(instance, platform, revision, job, zone);
if ( ! applicationPackage.trustedCertificates().isEmpty()
&& run.testerCertificate().isPresent())
applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get());
endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name()));
containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone);
}
ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles);
var quotaUsage = deploymentQuotaUsage(zone, job.application());
NotificationSource source = zone.environment().isManuallyDeployed() ?
NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId);
List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
.map(logs -> logs.stream()
.filter(log -> log.applicationPackage)
.filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue())
.map(log -> log.message)
.sorted()
.distinct()
.collect(Collectors.toList()))
.orElseGet(List::of);
if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage);
else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings);
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
instance -> instance.withNewDeployment(zone, revision, platform,
clock.instant(), warningsFrom(result),
quotaUsage))));
return result;
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) {
return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints,
Optional<EndpointCertificateMetadata> endpointCertificateMetadata,
Optional<TenantRoles> tenantRoles) {
try {
Optional<DockerImage> dockerImageRepo = Optional.ofNullable(
dockerImageRepoFlag
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
.value())
.filter(s -> !s.isBlank())
.map(DockerImage::fromString);
Optional<AthenzDomain> domain = controller.tenants().get(application.tenant())
.filter(tenant-> tenant instanceof AthenzTenant)
.map(tenant -> ((AthenzTenant)tenant).domain());
if (zone.environment().isManuallyDeployed())
controller.applications().applicationStore().putMeta(new DeploymentId(application, zone),
clock.instant(),
applicationPackage.metaDataZip());
Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()),
asList(application.tenant()), application, zone, applicationPackage.deploymentSpec());
List<TenantSecretStore> tenantSecretStores = controller.tenants()
.get(application.tenant())
.filter(tenant-> tenant instanceof CloudTenant)
.map(tenant -> ((CloudTenant) tenant).tenantSecretStores())
.orElse(List.of());
List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream()
.map(SupportAccessGrant::certificate)
.collect(toList());
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform,
endpoints, endpointCertificateMetadata, dockerImageRepo, domain,
tenantRoles, deploymentQuota, tenantSecretStores, operatorCertificates));
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream()
.map(Deployment::zone)
.filter(zone -> deploymentSpec.instance(instance).isEmpty()
|| ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(),
zone.region()))
.collect(toList());
if (deploymentsToRemove.isEmpty())
return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) +
" is deployed in " +
deploymentsToRemove.stream()
.map(zone -> zone.region().value())
.collect(joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance)
&& application.get().require(instance).deployments().size() == deploymentsToRemove.size();
for (ZoneId zone : deploymentsToRemove)
application = deactivate(application, instance, zone);
if (removeInstance)
application = application.without(instance);
return application;
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
public void deleteApplication(TenantAndApplicationId id, Credentials credentials) {
lockApplicationOrThrow(id, application -> {
var deployments = application.get().instances().values().stream()
.filter(instance -> ! instance.deployments().isEmpty())
.collect(toMap(instance -> instance.name(),
instance -> instance.deployments().keySet().stream()
.map(ZoneId::toString)
.collect(joining(", "))));
if ( ! deployments.isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments);
for (Instance instance : application.get().instances().values()) {
controller.routing().removeEndpointsInDns(application.get(), instance.name());
application = application.without(instance.name());
}
applicationStore.removeAll(id.tenant(), id.application());
applicationStore.removeAllTesters(id.tenant(), id.application());
applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant());
accessControl.deleteApplication(id, credentials);
curator.removeApplication(id);
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
log.info("Deleted " + id);
});
}
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId instanceId) {
if (getInstance(instanceId).isEmpty())
throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found");
lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> {
if ( ! application.get().require(instanceId.instance()).deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(joining(", ")));
if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty)
&& application.get().deploymentSpec().instanceNames().contains(instanceId.instance()))
throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first");
controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
log.info("Deleted " + instanceId);
});
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(requireApplication(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param restartFilter Variables to filter which nodes to restart.
*/
public void restart(DeploymentId deploymentId, RestartFilter restartFilter) {
configServer.restart(deploymentId, restartFilter);
}
/**
* Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual.
* If this cannot be ascertained, we must assumed it is not.
*/
public boolean isHealthy(DeploymentId deploymentId) {
try {
return ! isSuspended(deploymentId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e));
return false;
}
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
return configServer.isSuspended(deploymentId);
}
/** Sets suspension status of the given deployment in its zone. */
public void setSuspension(DeploymentId deploymentId, boolean suspend) {
configServer.setSuspension(deploymentId, suspend);
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId id, ZoneId zone) {
lockApplicationOrThrow(TenantAndApplicationId.from(id),
application -> store(deactivate(application, id.instance(), zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) {
DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone);
try {
configServer.deactivate(id);
} finally {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
if (!zone.environment().isTest())
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
}
return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(TenantAndApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName tenant where application should be deployed
* @param applicationPackage application package
* @param deployer principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain()
.map(domain -> new AthenzDomain(domain.value()));
if(identityDomain.isEmpty()) {
return;
}
if(! (accessControl instanceof AthenzFacade)) {
throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system.");
}
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
Optional<AthenzUser> athenzUser = getUser(deployer);
if (athenzUser.isPresent()) {
var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
var serviceToLaunch = instanceName
.flatMap(instance -> applicationPackage.deploymentSpec().instance(instance))
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> applicationPackage.deploymentSpec().athenzService())
.map(service -> new AthenzService(identityDomain.get(), service.value()));
if(serviceToLaunch.isPresent()) {
if (
! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) &&
! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get())
) {
throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " +
"service " + serviceToLaunch.get().getFullName() + ". " +
"Please reach out to the domain admin.");
}
} else {
throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value());
}
} else {
Tenant tenant = controller.tenants().require(tenantName);
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain, identityDomain.get()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
}
}
private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) {
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) return;
if (!zone.environment().isProduction()) return;
boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned();
boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 &&
!(revision.isUnknown() && controller.system().isCd());
if (platformIsOlder || revisionIsOlder)
throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion()));
}
private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) {
return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"));
}
private ApplicationId dashToUnderscore(ApplicationId id) {
return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance());
}
private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) {
var application = configServer.nodeRepository().getApplication(zoneId, applicationId);
return DeploymentQuotaCalculator.calculateQuotaUsage(application);
}
private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) {
return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone)
: applicationStore.get(application.tenant(), application.application(), revision));
}
/*
* Get the AthenzUser from this principal or Optional.empty if this does not represent a user.
*/
private Optional<AthenzUser> getUser(Optional<Principal> deployer) {
return deployer
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast);
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(domain -> {
controller.zoneRegistry().zones().reachable().ids().forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone);
deploymentSpec.athenzService().ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
deploymentSpec.instances().forEach(spec -> {
spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
});
});
});
}
private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) {
if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService))
throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName());
}
/** Returns the latest known version within the given major. */
public Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.readVersionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} |
😞 we should make it one shared code path. | public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
var existingInstances = application.get().instances().keySet();
var declaredInstances = applicationPackage.deploymentSpec().instanceNames();
for (var name : declaredInstances)
if ( ! existingInstances.contains(name))
application = withNewInstance(application, application.get().id().instance(name));
for (InstanceName name : existingInstances) {
application = withoutDeletedDeployments(application, name);
}
for (InstanceName instance : declaredInstances)
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
controller.jobController().deploymentStatus(application.get());
for (var name : existingInstances)
if ( ! declaredInstances.contains(name))
controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
store(application);
return application;
} | controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name))); | public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
var existingInstances = application.get().instances().keySet();
var declaredInstances = applicationPackage.deploymentSpec().instanceNames();
for (var name : declaredInstances)
if ( ! existingInstances.contains(name))
application = withNewInstance(application, application.get().id().instance(name));
for (InstanceName name : existingInstances) {
application = withoutDeletedDeployments(application, name);
}
for (InstanceName instance : declaredInstances)
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
controller.jobController().deploymentStatus(application.get());
for (var name : existingInstances)
if ( ! declaredInstances.contains(name))
controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
store(application);
return application;
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final ApplicationPackageValidator applicationPackageValidator;
private final EndpointCertificates endpointCertificates;
private final StringFlag dockerImageRepoFlag;
private final BillingController billingController;
ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock,
FlagSource flagSource, BillingController billingController) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource);
this.billingController = billingController;
deploymentTrigger = new DeploymentTrigger(controller, clock);
applicationPackageValidator = new ApplicationPackageValidator(controller);
endpointCertificates = new EndpointCertificates(controller,
controller.serviceRegistry().endpointCertificateProvider(),
controller.serviceRegistry().endpointCertificateValidator());
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (TenantAndApplicationId id : curator.readApplicationIds()) {
lockApplicationIfPresent(id, application -> {
for (InstanceName instance : application.get().deploymentSpec().instanceNames())
if (!application.get().instances().containsKey(instance))
application = withNewInstance(application, id.instance(instance));
store(application);
});
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> getApplication(TenantAndApplicationId id) {
return curator.readApplication(id);
}
/** Returns the instance with the given id, or null if it is not present */
public Optional<Instance> getInstance(ApplicationId id) {
return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance()));
}
/**
* Triggers reindexing for the given document types in the given clusters, for the given application.
*
* If no clusters are given, reindexing is triggered for the entire application; otherwise
* if no documents types are given, reindexing is triggered for all given clusters; otherwise
* reindexing is triggered for the cartesian product of the given clusters and document types.
*/
public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) {
configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly);
}
/** Returns the reindexing status for the given application in the given zone. */
public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) {
return configServer.getReindexing(new DeploymentId(id, zoneId));
}
/** Enables reindexing for the given application in the given zone. */
public void enableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.enableReindexing(new DeploymentId(id, zoneId));
}
/** Disables reindexing for the given application in the given zone. */
public void disableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.disableReindexing(new DeploymentId(id, zoneId));
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application requireApplication(TenantAndApplicationId id) {
return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/**
* Returns the instance with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Instance requireInstance(ApplicationId id) {
return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return curator.readApplications(false);
}
/**
* Returns a snapshot of all readable applications. Unlike {@link ApplicationController
* applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete
* snapshot.
*
* This should only be used in cases where acting on a subset of applications is better than none.
*/
public List<Application> readable() {
return curator.readApplications(true);
}
/** Returns the ID of all known applications. */
public List<TenantAndApplicationId> idList() {
return curator.readApplicationIds();
}
/** Returns a snapshot of all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return curator.readApplications(tenant);
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns all currently reachable content clusters among the given deployments. */
public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) {
Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (DeploymentId id : ids)
if (isHealthy(id))
clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id)));
return Collections.unmodifiableMap(clusters);
}
/** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */
private Optional<Version> oldestInstalledPlatform(JobStatus job) {
Version oldest = null;
for (Run run : job.runs().descendingMap().values()) {
Version version = run.versions().targetPlatform();
if (oldest == null || version.isBefore(oldest))
oldest = version;
if (run.status() == RunStatus.success)
return Optional.of(oldest);
}
return oldestInstalledPlatform(job.id());
}
/** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */
private Optional<Version> oldestInstalledPlatform(JobId job) {
return configServer.nodeRepository().list(job.type().zone(controller.system()),
job.application(),
EnumSet.of(active, reserved))
.stream()
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder());
}
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(TenantAndApplicationId id) {
return controller.jobController().deploymentStatus(requireApplication(id)).jobs()
.production().asList().stream()
.map(this::oldestInstalledPlatform)
.flatMap(Optional::stream)
.min(naturalOrder())
.orElse(controller.readSystemVersion());
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(TenantAndApplicationId id, Credentials credentials) {
try (Lock lock = lock(id)) {
if (getApplication(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (getApplication(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
if (controller.tenants().get(id.tenant()).isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
accessControl.createApplication(id, credentials);
LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock);
store(locked);
log.info("Created " + locked);
return locked.get();
}
}
/**
* Creates a new instance for an existing application.
*
* @throws IllegalArgumentException if the instance already exists, or has an invalid instance name.
*/
public void createInstance(ApplicationId id) {
lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
store(withNewInstance(application, id));
});
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) {
return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version));
}
/** Returns given application with a new instance */
public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) {
if (instance.instance().isTester())
throw new IllegalArgumentException("'" + instance + "' is a tester application!");
InstanceId.validate(instance.instance().value());
if (getInstance(instance).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists");
if (getInstance(dashToUnderscore(instance)).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists");
log.info("Created " + instance);
return application.withNewInstance(instance.instance());
}
/** Deploys an application package for an existing application instance. */
public ActivateResult deploy(JobId job, boolean deploySourceVersions) {
if (job.application().instance().isTester())
throw new IllegalArgumentException("'" + job.application() + "' is a tester application!");
TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application());
ZoneId zone = job.type().zone(controller.system());
try (Lock deploymentLock = lockForDeployment(job.application(), zone)) {
Set<ContainerEndpoint> containerEndpoints;
Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
Optional<TenantRoles> tenantRoles = Optional.empty();
Run run = controller.jobController().last(job)
.orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'"));
if (run.hasEnded())
throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running");
Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform());
ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication());
ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision);
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
Instance instance = application.get().require(job.application().instance());
rejectOldChange(instance, platform, revision, job, zone);
if ( ! applicationPackage.trustedCertificates().isEmpty()
&& run.testerCertificate().isPresent())
applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get());
endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name()));
containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone);
}
ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles);
var quotaUsage = deploymentQuotaUsage(zone, job.application());
NotificationSource source = zone.environment().isManuallyDeployed() ?
NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId);
List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
.map(logs -> logs.stream()
.filter(log -> log.applicationPackage)
.filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue())
.map(log -> log.message)
.sorted()
.distinct()
.collect(Collectors.toList()))
.orElseGet(List::of);
if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage);
else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings);
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
instance -> instance.withNewDeployment(zone, revision, platform,
clock.instant(), warningsFrom(result),
quotaUsage))));
return result;
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) {
return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints,
Optional<EndpointCertificateMetadata> endpointCertificateMetadata,
Optional<TenantRoles> tenantRoles) {
try {
Optional<DockerImage> dockerImageRepo = Optional.ofNullable(
dockerImageRepoFlag
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
.value())
.filter(s -> !s.isBlank())
.map(DockerImage::fromString);
Optional<AthenzDomain> domain = controller.tenants().get(application.tenant())
.filter(tenant-> tenant instanceof AthenzTenant)
.map(tenant -> ((AthenzTenant)tenant).domain());
if (zone.environment().isManuallyDeployed())
controller.applications().applicationStore().putMeta(new DeploymentId(application, zone),
clock.instant(),
applicationPackage.metaDataZip());
Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()),
asList(application.tenant()), application, zone, applicationPackage.deploymentSpec());
List<TenantSecretStore> tenantSecretStores = controller.tenants()
.get(application.tenant())
.filter(tenant-> tenant instanceof CloudTenant)
.map(tenant -> ((CloudTenant) tenant).tenantSecretStores())
.orElse(List.of());
List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream()
.map(SupportAccessGrant::certificate)
.collect(toList());
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform,
endpoints, endpointCertificateMetadata, dockerImageRepo, domain,
tenantRoles, deploymentQuota, tenantSecretStores, operatorCertificates));
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream()
.map(Deployment::zone)
.filter(zone -> deploymentSpec.instance(instance).isEmpty()
|| ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(),
zone.region()))
.collect(toList());
if (deploymentsToRemove.isEmpty())
return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) +
" is deployed in " +
deploymentsToRemove.stream()
.map(zone -> zone.region().value())
.collect(joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance)
&& application.get().require(instance).deployments().size() == deploymentsToRemove.size();
for (ZoneId zone : deploymentsToRemove)
application = deactivate(application, instance, zone);
if (removeInstance)
application = application.without(instance);
return application;
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
public void deleteApplication(TenantAndApplicationId id, Credentials credentials) {
lockApplicationOrThrow(id, application -> {
var deployments = application.get().instances().values().stream()
.filter(instance -> ! instance.deployments().isEmpty())
.collect(toMap(instance -> instance.name(),
instance -> instance.deployments().keySet().stream()
.map(ZoneId::toString)
.collect(joining(", "))));
if ( ! deployments.isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments);
for (Instance instance : application.get().instances().values()) {
controller.routing().removeEndpointsInDns(application.get(), instance.name());
application = application.without(instance.name());
}
applicationStore.removeAll(id.tenant(), id.application());
applicationStore.removeAllTesters(id.tenant(), id.application());
applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant());
accessControl.deleteApplication(id, credentials);
curator.removeApplication(id);
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
log.info("Deleted " + id);
});
}
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId instanceId) {
if (getInstance(instanceId).isEmpty())
throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found");
lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> {
if ( ! application.get().require(instanceId.instance()).deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(joining(", ")));
if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty)
&& application.get().deploymentSpec().instanceNames().contains(instanceId.instance()))
throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first");
controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
log.info("Deleted " + instanceId);
});
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(requireApplication(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param restartFilter Variables to filter which nodes to restart.
*/
public void restart(DeploymentId deploymentId, RestartFilter restartFilter) {
configServer.restart(deploymentId, restartFilter);
}
/**
* Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual.
* If this cannot be ascertained, we must assumed it is not.
*/
public boolean isHealthy(DeploymentId deploymentId) {
try {
return ! isSuspended(deploymentId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e));
return false;
}
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
return configServer.isSuspended(deploymentId);
}
/** Sets suspension status of the given deployment in its zone. */
public void setSuspension(DeploymentId deploymentId, boolean suspend) {
configServer.setSuspension(deploymentId, suspend);
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId id, ZoneId zone) {
lockApplicationOrThrow(TenantAndApplicationId.from(id),
application -> store(deactivate(application, id.instance(), zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) {
DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone);
try {
configServer.deactivate(id);
} finally {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
if (!zone.environment().isTest())
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
}
return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(TenantAndApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName tenant where application should be deployed
* @param applicationPackage application package
* @param deployer principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain()
.map(domain -> new AthenzDomain(domain.value()));
if(identityDomain.isEmpty()) {
return;
}
if(! (accessControl instanceof AthenzFacade)) {
throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system.");
}
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
Optional<AthenzUser> athenzUser = getUser(deployer);
if (athenzUser.isPresent()) {
var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
var serviceToLaunch = instanceName
.flatMap(instance -> applicationPackage.deploymentSpec().instance(instance))
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> applicationPackage.deploymentSpec().athenzService())
.map(service -> new AthenzService(identityDomain.get(), service.value()));
if(serviceToLaunch.isPresent()) {
if (
! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) &&
! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get())
) {
throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " +
"service " + serviceToLaunch.get().getFullName() + ". " +
"Please reach out to the domain admin.");
}
} else {
throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value());
}
} else {
Tenant tenant = controller.tenants().require(tenantName);
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain, identityDomain.get()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
}
}
private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) {
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) return;
if (!zone.environment().isProduction()) return;
boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned();
boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 &&
!(revision.isUnknown() && controller.system().isCd());
if (platformIsOlder || revisionIsOlder)
throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion()));
}
private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) {
return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"));
}
private ApplicationId dashToUnderscore(ApplicationId id) {
return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance());
}
private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) {
var application = configServer.nodeRepository().getApplication(zoneId, applicationId);
return DeploymentQuotaCalculator.calculateQuotaUsage(application);
}
private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) {
return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone)
: applicationStore.get(application.tenant(), application.application(), revision));
}
/*
* Get the AthenzUser from this principal or Optional.empty if this does not represent a user.
*/
private Optional<AthenzUser> getUser(Optional<Principal> deployer) {
return deployer
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast);
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(domain -> {
controller.zoneRegistry().zones().reachable().ids().forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone);
deploymentSpec.athenzService().ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
deploymentSpec.instances().forEach(spec -> {
spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
});
});
});
}
private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) {
if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService))
throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName());
}
/** Returns the latest known version within the given major. */
public Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.readVersionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final ApplicationPackageValidator applicationPackageValidator;
private final EndpointCertificates endpointCertificates;
private final StringFlag dockerImageRepoFlag;
private final BillingController billingController;
ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock,
FlagSource flagSource, BillingController billingController) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource);
this.billingController = billingController;
deploymentTrigger = new DeploymentTrigger(controller, clock);
applicationPackageValidator = new ApplicationPackageValidator(controller);
endpointCertificates = new EndpointCertificates(controller,
controller.serviceRegistry().endpointCertificateProvider(),
controller.serviceRegistry().endpointCertificateValidator());
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (TenantAndApplicationId id : curator.readApplicationIds()) {
lockApplicationIfPresent(id, application -> {
for (InstanceName instance : application.get().deploymentSpec().instanceNames())
if (!application.get().instances().containsKey(instance))
application = withNewInstance(application, id.instance(instance));
store(application);
});
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> getApplication(TenantAndApplicationId id) {
return curator.readApplication(id);
}
/** Returns the instance with the given id, or null if it is not present */
public Optional<Instance> getInstance(ApplicationId id) {
return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance()));
}
/**
* Triggers reindexing for the given document types in the given clusters, for the given application.
*
* If no clusters are given, reindexing is triggered for the entire application; otherwise
* if no documents types are given, reindexing is triggered for all given clusters; otherwise
* reindexing is triggered for the cartesian product of the given clusters and document types.
*/
public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) {
configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly);
}
/** Returns the reindexing status for the given application in the given zone. */
public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) {
return configServer.getReindexing(new DeploymentId(id, zoneId));
}
/** Enables reindexing for the given application in the given zone. */
public void enableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.enableReindexing(new DeploymentId(id, zoneId));
}
/** Disables reindexing for the given application in the given zone. */
public void disableReindexing(ApplicationId id, ZoneId zoneId) {
configServer.disableReindexing(new DeploymentId(id, zoneId));
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application requireApplication(TenantAndApplicationId id) {
return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/**
* Returns the instance with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Instance requireInstance(ApplicationId id) {
return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return curator.readApplications(false);
}
/**
* Returns a snapshot of all readable applications. Unlike {@link ApplicationController
* applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete
* snapshot.
*
* This should only be used in cases where acting on a subset of applications is better than none.
*/
public List<Application> readable() {
return curator.readApplications(true);
}
/** Returns the ID of all known applications. */
public List<TenantAndApplicationId> idList() {
return curator.readApplicationIds();
}
/** Returns a snapshot of all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return curator.readApplications(tenant);
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns all currently reachable content clusters among the given deployments. */
public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) {
Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (DeploymentId id : ids)
if (isHealthy(id))
clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id)));
return Collections.unmodifiableMap(clusters);
}
/** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */
private Optional<Version> oldestInstalledPlatform(JobStatus job) {
Version oldest = null;
for (Run run : job.runs().descendingMap().values()) {
Version version = run.versions().targetPlatform();
if (oldest == null || version.isBefore(oldest))
oldest = version;
if (run.status() == RunStatus.success)
return Optional.of(oldest);
}
return oldestInstalledPlatform(job.id());
}
/** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */
private Optional<Version> oldestInstalledPlatform(JobId job) {
return configServer.nodeRepository().list(job.type().zone(controller.system()),
job.application(),
EnumSet.of(active, reserved))
.stream()
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder());
}
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(TenantAndApplicationId id) {
return controller.jobController().deploymentStatus(requireApplication(id)).jobs()
.production().asList().stream()
.map(this::oldestInstalledPlatform)
.flatMap(Optional::stream)
.min(naturalOrder())
.orElse(controller.readSystemVersion());
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(TenantAndApplicationId id, Credentials credentials) {
try (Lock lock = lock(id)) {
if (getApplication(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (getApplication(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
if (controller.tenants().get(id.tenant()).isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
accessControl.createApplication(id, credentials);
LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock);
store(locked);
log.info("Created " + locked);
return locked.get();
}
}
/**
* Creates a new instance for an existing application.
*
* @throws IllegalArgumentException if the instance already exists, or has an invalid instance name.
*/
public void createInstance(ApplicationId id) {
lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
store(withNewInstance(application, id));
});
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) {
return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version));
}
/** Returns given application with a new instance */
public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) {
if (instance.instance().isTester())
throw new IllegalArgumentException("'" + instance + "' is a tester application!");
InstanceId.validate(instance.instance().value());
if (getInstance(instance).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists");
if (getInstance(dashToUnderscore(instance)).isPresent())
throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists");
log.info("Created " + instance);
return application.withNewInstance(instance.instance());
}
/** Deploys an application package for an existing application instance. */
public ActivateResult deploy(JobId job, boolean deploySourceVersions) {
if (job.application().instance().isTester())
throw new IllegalArgumentException("'" + job.application() + "' is a tester application!");
TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application());
ZoneId zone = job.type().zone(controller.system());
try (Lock deploymentLock = lockForDeployment(job.application(), zone)) {
Set<ContainerEndpoint> containerEndpoints;
Optional<EndpointCertificateMetadata> endpointCertificateMetadata;
Optional<TenantRoles> tenantRoles = Optional.empty();
Run run = controller.jobController().last(job)
.orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'"));
if (run.hasEnded())
throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running");
Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform());
ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication());
ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision);
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
Instance instance = application.get().require(job.application().instance());
rejectOldChange(instance, platform, revision, job, zone);
if ( ! applicationPackage.trustedCertificates().isEmpty()
&& run.testerCertificate().isPresent())
applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get());
endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name()));
containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone);
}
ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles);
var quotaUsage = deploymentQuotaUsage(zone, job.application());
NotificationSource source = zone.environment().isManuallyDeployed() ?
NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId);
List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
.map(logs -> logs.stream()
.filter(log -> log.applicationPackage)
.filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue())
.map(log -> log.message)
.sorted()
.distinct()
.collect(Collectors.toList()))
.orElseGet(List::of);
if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage);
else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings);
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
instance -> instance.withNewDeployment(zone, revision, platform,
clock.instant(), warningsFrom(result),
quotaUsage))));
return result;
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) {
return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints,
Optional<EndpointCertificateMetadata> endpointCertificateMetadata,
Optional<TenantRoles> tenantRoles) {
try {
Optional<DockerImage> dockerImageRepo = Optional.ofNullable(
dockerImageRepoFlag
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
.value())
.filter(s -> !s.isBlank())
.map(DockerImage::fromString);
Optional<AthenzDomain> domain = controller.tenants().get(application.tenant())
.filter(tenant-> tenant instanceof AthenzTenant)
.map(tenant -> ((AthenzTenant)tenant).domain());
if (zone.environment().isManuallyDeployed())
controller.applications().applicationStore().putMeta(new DeploymentId(application, zone),
clock.instant(),
applicationPackage.metaDataZip());
Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()),
asList(application.tenant()), application, zone, applicationPackage.deploymentSpec());
List<TenantSecretStore> tenantSecretStores = controller.tenants()
.get(application.tenant())
.filter(tenant-> tenant instanceof CloudTenant)
.map(tenant -> ((CloudTenant) tenant).tenantSecretStores())
.orElse(List.of());
List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream()
.map(SupportAccessGrant::certificate)
.collect(toList());
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform,
endpoints, endpointCertificateMetadata, dockerImageRepo, domain,
tenantRoles, deploymentQuota, tenantSecretStores, operatorCertificates));
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream()
.map(Deployment::zone)
.filter(zone -> deploymentSpec.instance(instance).isEmpty()
|| ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(),
zone.region()))
.collect(toList());
if (deploymentsToRemove.isEmpty())
return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) +
" is deployed in " +
deploymentsToRemove.stream()
.map(zone -> zone.region().value())
.collect(joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance)
&& application.get().require(instance).deployments().size() == deploymentsToRemove.size();
for (ZoneId zone : deploymentsToRemove)
application = deactivate(application, instance, zone);
if (removeInstance)
application = application.without(instance);
return application;
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
public void deleteApplication(TenantAndApplicationId id, Credentials credentials) {
lockApplicationOrThrow(id, application -> {
var deployments = application.get().instances().values().stream()
.filter(instance -> ! instance.deployments().isEmpty())
.collect(toMap(instance -> instance.name(),
instance -> instance.deployments().keySet().stream()
.map(ZoneId::toString)
.collect(joining(", "))));
if ( ! deployments.isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments);
for (Instance instance : application.get().instances().values()) {
controller.routing().removeEndpointsInDns(application.get(), instance.name());
application = application.without(instance.name());
}
applicationStore.removeAll(id.tenant(), id.application());
applicationStore.removeAllTesters(id.tenant(), id.application());
applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant());
accessControl.deleteApplication(id, credentials);
curator.removeApplication(id);
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
log.info("Deleted " + id);
});
}
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId instanceId) {
if (getInstance(instanceId).isEmpty())
throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found");
lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> {
if ( ! application.get().require(instanceId.instance()).deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(joining(", ")));
if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty)
&& application.get().deploymentSpec().instanceNames().contains(instanceId.instance()))
throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first");
controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
log.info("Deleted " + instanceId);
});
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(requireApplication(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param restartFilter Variables to filter which nodes to restart.
*/
public void restart(DeploymentId deploymentId, RestartFilter restartFilter) {
configServer.restart(deploymentId, restartFilter);
}
/**
* Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual.
* If this cannot be ascertained, we must assumed it is not.
*/
public boolean isHealthy(DeploymentId deploymentId) {
try {
return ! isSuspended(deploymentId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e));
return false;
}
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
return configServer.isSuspended(deploymentId);
}
/** Sets suspension status of the given deployment in its zone. */
public void setSuspension(DeploymentId deploymentId, boolean suspend) {
configServer.setSuspension(deploymentId, suspend);
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId id, ZoneId zone) {
lockApplicationOrThrow(TenantAndApplicationId.from(id),
application -> store(deactivate(application, id.instance(), zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) {
DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone);
try {
configServer.deactivate(id);
} finally {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
if (!zone.environment().isTest())
controller.notificationsDb().removeNotifications(NotificationSource.from(id));
}
return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(TenantAndApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName tenant where application should be deployed
* @param applicationPackage application package
* @param deployer principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain()
.map(domain -> new AthenzDomain(domain.value()));
if(identityDomain.isEmpty()) {
return;
}
if(! (accessControl instanceof AthenzFacade)) {
throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system.");
}
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
Optional<AthenzUser> athenzUser = getUser(deployer);
if (athenzUser.isPresent()) {
var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
var serviceToLaunch = instanceName
.flatMap(instance -> applicationPackage.deploymentSpec().instance(instance))
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> applicationPackage.deploymentSpec().athenzService())
.map(service -> new AthenzService(identityDomain.get(), service.value()));
if(serviceToLaunch.isPresent()) {
if (
! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) &&
! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get())
) {
throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " +
"service " + serviceToLaunch.get().getFullName() + ". " +
"Please reach out to the domain admin.");
}
} else {
throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value());
}
} else {
Tenant tenant = controller.tenants().require(tenantName);
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain, identityDomain.get()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
}
}
private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) {
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) return;
if (!zone.environment().isProduction()) return;
boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned();
boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 &&
!(revision.isUnknown() && controller.system().isCd());
if (platformIsOlder || revisionIsOlder)
throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion()));
}
private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) {
return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"));
}
private ApplicationId dashToUnderscore(ApplicationId id) {
return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance());
}
private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) {
var application = configServer.nodeRepository().getApplication(zoneId, applicationId);
return DeploymentQuotaCalculator.calculateQuotaUsage(application);
}
private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) {
return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone)
: applicationStore.get(application.tenant(), application.application(), revision));
}
/*
* Get the AthenzUser from this principal or Optional.empty if this does not represent a user.
*/
private Optional<AthenzUser> getUser(Optional<Principal> deployer) {
return deployer
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast);
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(domain -> {
controller.zoneRegistry().zones().reachable().ids().forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone);
deploymentSpec.athenzService().ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
deploymentSpec.instances().forEach(spec -> {
spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> {
verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value()));
});
});
});
});
}
private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) {
if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService))
throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName());
}
/** Returns the latest known version within the given major. */
public Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.readVersionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} |
Nit: In general it's better to use `log.log(<level>, () -> "string")` to create the message lazily. You could also put the list collection inside that supplier. | protected double maintain() {
var expiredTenants = controller().tenants().asList().stream()
.filter(this::tenantIsCloudTenant)
.filter(this::tenantHasTrialPlan)
.filter(this::tenantIsNotExemptFromExpiry)
.filter(this::tenantReadersNotLoggedIn)
.filter(this::tenantHasNoDeployments)
.collect(Collectors.toList());
var expiredTenantNames = expiredTenants.stream()
.map(Tenant::name)
.map(TenantName::value)
.collect(Collectors.joining(", "));
log.info("Moving expired tenants to 'none' plan: " + expiredTenantNames);
expireTenants(expiredTenants);
return 0;
} | log.info("Moving expired tenants to 'none' plan: " + expiredTenantNames); | protected double maintain() {
var expiredTenants = controller().tenants().asList().stream()
.filter(this::tenantIsCloudTenant)
.filter(this::tenantHasTrialPlan)
.filter(this::tenantIsNotExemptFromExpiry)
.filter(this::tenantReadersNotLoggedIn)
.filter(this::tenantHasNoDeployments)
.collect(Collectors.toList());
if (! expiredTenants.isEmpty()) {
var expiredTenantNames = expiredTenants.stream()
.map(Tenant::name)
.map(TenantName::value)
.collect(Collectors.joining(", "));
log.info("Moving expired tenants to 'none' plan: " + expiredTenantNames);
}
expireTenants(expiredTenants);
return 1;
} | class CloudTrialExpirer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(CloudTrialExpirer.class.getName());
private static final Duration loginExpiry = Duration.ofDays(14);
private final ListFlag<String> extendedTrialTenants;
public CloudTrialExpirer(Controller controller, Duration interval) {
super(controller, interval, null, SystemName.allOf(SystemName::isPublic));
this.extendedTrialTenants = PermanentFlags.EXTENDED_TRIAL_TENANTS.bindTo(controller().flagSource());
}
@Override
private boolean tenantIsCloudTenant(Tenant tenant) {
return tenant.type() == Tenant.Type.cloud;
}
private boolean tenantReadersNotLoggedIn(Tenant tenant) {
return tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.map(instant -> {
var sinceLastLogin = Duration.between(instant, controller().clock().instant());
return sinceLastLogin.compareTo(loginExpiry) > 0;
})
.orElse(false);
}
private boolean tenantHasTrialPlan(Tenant tenant) {
var planId = controller().serviceRegistry().billingController().getPlan(tenant.name());
return "trial".equals(planId.value());
}
private boolean tenantIsNotExemptFromExpiry(Tenant tenant) {
return ! extendedTrialTenants.value().contains(tenant.name().value());
}
private boolean tenantHasNoDeployments(Tenant tenant) {
return controller().applications().asList(tenant.name()).stream()
.flatMap(app -> app.instances().values().stream())
.mapToLong(instance -> instance.deployments().values().size())
.sum() == 0;
}
private void expireTenants(List<Tenant> tenants) {
tenants.forEach(tenant -> {
controller().serviceRegistry().billingController().setPlan(tenant.name(), PlanId.from("none"), false);
});
}
} | class CloudTrialExpirer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(CloudTrialExpirer.class.getName());
private static final Duration loginExpiry = Duration.ofDays(14);
private final ListFlag<String> extendedTrialTenants;
public CloudTrialExpirer(Controller controller, Duration interval) {
super(controller, interval, null, SystemName.allOf(SystemName::isPublic));
this.extendedTrialTenants = PermanentFlags.EXTENDED_TRIAL_TENANTS.bindTo(controller().flagSource());
}
@Override
private boolean tenantIsCloudTenant(Tenant tenant) {
return tenant.type() == Tenant.Type.cloud;
}
private boolean tenantReadersNotLoggedIn(Tenant tenant) {
return tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.map(instant -> {
var sinceLastLogin = Duration.between(instant, controller().clock().instant());
return sinceLastLogin.compareTo(loginExpiry) > 0;
})
.orElse(false);
}
private boolean tenantHasTrialPlan(Tenant tenant) {
var planId = controller().serviceRegistry().billingController().getPlan(tenant.name());
return "trial".equals(planId.value());
}
private boolean tenantIsNotExemptFromExpiry(Tenant tenant) {
return ! extendedTrialTenants.value().contains(tenant.name().value());
}
private boolean tenantHasNoDeployments(Tenant tenant) {
return controller().applications().asList(tenant.name()).stream()
.flatMap(app -> app.instances().values().stream())
.mapToLong(instance -> instance.deployments().values().size())
.sum() == 0;
}
private void expireTenants(List<Tenant> tenants) {
tenants.forEach(tenant -> {
controller().serviceRegistry().billingController().setPlan(tenant.name(), PlanId.from("none"), false);
});
}
} |
Using `lastSuccess` here means this will trigger back to back whenever a job is failing. Is that intentional? I guess there are pros and cons, and this is only meant for manual intervention? | private boolean needsTrigger(RetriggerEntry entry) {
return controller().jobController().lastSuccess(entry.jobId())
.filter(run -> run.id().number() < entry.requiredRun())
.isPresent();
} | return controller().jobController().lastSuccess(entry.jobId()) | private boolean needsTrigger(RetriggerEntry entry) {
return controller().jobController().lastCompleted(entry.jobId())
.filter(run -> run.id().number() < entry.requiredRun())
.isPresent();
} | class RetriggerMaintainer extends ControllerMaintainer {
private static final Logger logger = Logger.getLogger(RetriggerMaintainer.class.getName());
public RetriggerMaintainer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected double maintain() {
try (var lock = controller().curator().lockDeploymentRetriggerQueue()) {
List<RetriggerEntry> retriggerEntries = controller().curator().readRetriggerEntries();
retriggerEntries.stream()
.filter(this::needsTrigger)
.filter(entry -> readyToTrigger(entry.jobId()))
.forEach(entry -> controller().applications().deploymentTrigger().reTrigger(entry.jobId().application(), entry.jobId().type()));
List<RetriggerEntry> remaining = retriggerEntries.stream()
.filter(this::needsTrigger)
.collect(Collectors.toList());
controller().curator().writeRetriggerEntries(remaining);
} catch (Exception e) {
logger.log(Level.WARNING, "Exception while triggering jobs", e);
return 0.0;
}
return 1.0;
}
/*
Returns true if a job is ready to run, i.e is currently not running
*/
private boolean readyToTrigger(JobId jobId) {
Optional<Run> existingRun = controller().jobController().active(jobId.application()).stream()
.filter(run -> run.id().type().equals(jobId.type()))
.findFirst();
return existingRun.isEmpty();
}
/*
Returns true of job needs triggering. I.e the job has not run since the queue item was last run.
*/
} | class RetriggerMaintainer extends ControllerMaintainer {
private static final Logger logger = Logger.getLogger(RetriggerMaintainer.class.getName());
public RetriggerMaintainer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected double maintain() {
try (var lock = controller().curator().lockDeploymentRetriggerQueue()) {
List<RetriggerEntry> retriggerEntries = controller().curator().readRetriggerEntries();
retriggerEntries.stream()
.filter(this::needsTrigger)
.filter(entry -> readyToTrigger(entry.jobId()))
.forEach(entry -> controller().applications().deploymentTrigger().reTrigger(entry.jobId().application(), entry.jobId().type()));
List<RetriggerEntry> remaining = retriggerEntries.stream()
.filter(this::needsTrigger)
.collect(Collectors.toList());
controller().curator().writeRetriggerEntries(remaining);
} catch (Exception e) {
logger.log(Level.WARNING, "Exception while triggering jobs", e);
return 0.0;
}
return 1.0;
}
/*
Returns true if a job is ready to run, i.e is currently not running
*/
private boolean readyToTrigger(JobId jobId) {
Optional<Run> existingRun = controller().jobController().active(jobId.application()).stream()
.filter(run -> run.id().type().equals(jobId.type()))
.findFirst();
return existingRun.isEmpty();
}
/*
Returns true of job needs triggering. I.e the job has not run since the queue item was last run.
*/
} |
Should this also happen on `allowSupportAccess`? | private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
} | controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment); | private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.code()) {
case NOT_FOUND:
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse notifications(String tenantName, HttpRequest request) {
NotificationSource notificationSource = new NotificationSource(TenantName.from(tenantName),
Optional.ofNullable(request.getProperty("application")).map(ApplicationName::from),
Optional.ofNullable(request.getProperty("instance")).map(InstanceName::from),
Optional.empty(), Optional.empty(), Optional.empty(), OptionalLong.empty());
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
controller.notificationsDb().listNotifications(notificationSource, showOnlyProductionInstances(request))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification));
return new SlimeJsonResponse(slime);
}
private static void toSlime(Cursor cursor, Notification notification) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
switch (type) {
case applicationPackage: return "applicationPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
}
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.history().stream().anyMatch(event -> "down".equals(event.getEvent())));
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.targets(deploymentId.zoneId());
if (!legacyEndpoints) {
globalEndpoints = globalEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant())
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
List.of(controller.zoneRegistry().getConfigServerVipUri(zone)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
List.of(controller.zoneRegistry().getConfigServerVipUri(deploymentId.zoneId())),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
cloudTenant.archiveAccessRole().ifPresent(role -> object.setString("archiveAccessRole", role));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
}
else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream())
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(Deployment::at)
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.system()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.code()) {
case NOT_FOUND:
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse notifications(String tenantName, HttpRequest request) {
NotificationSource notificationSource = new NotificationSource(TenantName.from(tenantName),
Optional.ofNullable(request.getProperty("application")).map(ApplicationName::from),
Optional.ofNullable(request.getProperty("instance")).map(InstanceName::from),
Optional.empty(), Optional.empty(), Optional.empty(), OptionalLong.empty());
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
controller.notificationsDb().listNotifications(notificationSource, showOnlyProductionInstances(request))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification));
return new SlimeJsonResponse(slime);
}
private static void toSlime(Cursor cursor, Notification notification) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
switch (type) {
case applicationPackage: return "applicationPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
}
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.history().stream().anyMatch(event -> "down".equals(event.getEvent())));
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.targets(deploymentId.zoneId());
if (!legacyEndpoints) {
globalEndpoints = globalEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant())
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
List.of(controller.zoneRegistry().getConfigServerVipUri(zone)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
List.of(controller.zoneRegistry().getConfigServerVipUri(deploymentId.zoneId())),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
cloudTenant.archiveAccessRole().ifPresent(role -> object.setString("archiveAccessRole", role));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
}
else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream())
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(Deployment::at)
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.system()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} |
NVM, I see this is run below, in a different API. | private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
} | controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment); | private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.code()) {
case NOT_FOUND:
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse notifications(String tenantName, HttpRequest request) {
NotificationSource notificationSource = new NotificationSource(TenantName.from(tenantName),
Optional.ofNullable(request.getProperty("application")).map(ApplicationName::from),
Optional.ofNullable(request.getProperty("instance")).map(InstanceName::from),
Optional.empty(), Optional.empty(), Optional.empty(), OptionalLong.empty());
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
controller.notificationsDb().listNotifications(notificationSource, showOnlyProductionInstances(request))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification));
return new SlimeJsonResponse(slime);
}
private static void toSlime(Cursor cursor, Notification notification) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
switch (type) {
case applicationPackage: return "applicationPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
}
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.history().stream().anyMatch(event -> "down".equals(event.getEvent())));
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.targets(deploymentId.zoneId());
if (!legacyEndpoints) {
globalEndpoints = globalEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant())
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
List.of(controller.zoneRegistry().getConfigServerVipUri(zone)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
List.of(controller.zoneRegistry().getConfigServerVipUri(deploymentId.zoneId())),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
cloudTenant.archiveAccessRole().ifPresent(role -> object.setString("archiveAccessRole", role));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
}
else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream())
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(Deployment::at)
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.system()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.code()) {
case NOT_FOUND:
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantInfo mergedInfo = TenantInfo.EMPTY
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.email()))
.withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail()))
.withContactName(getString(insp.field("contactName"), oldInfo.contactName()))
.withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()));
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) {
if (!insp.valid()) return oldAddress;
return TenantInfoAddress.EMPTY
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip()))
.withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines()));
}
private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) {
if (!insp.valid()) return oldContact;
return TenantInfoBillingContact.EMPTY
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private HttpResponse notifications(String tenantName, HttpRequest request) {
NotificationSource notificationSource = new NotificationSource(TenantName.from(tenantName),
Optional.ofNullable(request.getProperty("application")).map(ApplicationName::from),
Optional.ofNullable(request.getProperty("instance")).map(InstanceName::from),
Optional.empty(), Optional.empty(), Optional.empty(), OptionalLong.empty());
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
controller.notificationsDb().listNotifications(notificationSource, showOnlyProductionInstances(request))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification));
return new SlimeJsonResponse(slime);
}
private static void toSlime(Cursor cursor, Notification notification) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
switch (type) {
case applicationPackage: return "applicationPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
}
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
var zoneId = ZoneId.from(request.getProperty("zone"));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponse.internalServerError(response);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("Archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.empty());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.history().stream().anyMatch(event -> "down".equals(event.getEvent())));
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
.targets(deploymentId.zoneId());
if (!legacyEndpoints) {
globalEndpoints = globalEndpoints.not().legacy();
}
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant())
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.readVersionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.forEach((applicationId, resources) -> {
String instanceName = applicationId.instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
resources.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ZoneId zone = requireZone(environment, region);
ServiceApiResponse response = new ServiceApiResponse(zone,
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
List.of(controller.zoneRegistry().getConfigServerVipUri(zone)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String[] parts = restPath.split("/status/");
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
List.of(controller.zoneRegistry().getConfigServerVipUri(deploymentId.zoneId())),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if (!versionStatus.isActive(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.collect(toUnmodifiableList());
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.collect(toUnmodifiableList());
controller.applications().reindex(id, zone, clusterNames, documentTypes, request.getBooleanProperty("indexedOnly"));
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes))));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
}
private static String toString(ApplicationReindexing.State state) {
switch (state) {
case PENDING: return "pending";
case RUNNING: return "running";
case FAILED: return "failed";
case SUCCESSFUL: return "successful";
default: return null;
}
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name());
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(tenantQuota, usedQuota, object.setObject("quota"));
cloudTenant.archiveAccessRole().ifPresent(role -> object.setString("archiveAccessRole", role));
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
}
else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(Quota quota, QuotaUsage usage, Cursor object) {
quota.budget().ifPresentOrElse(
budget -> object.setDouble("budget", budget.doubleValue()),
() -> object.setNix("budget")
);
object.setDouble("budgetUsed", usage.rate());
quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize));
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream())
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(Deployment::at)
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.system()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application,
Optional.empty(), Optional.empty(), Optional.empty(), 1,
ApplicationPackage.deploymentRemoval(), new byte[0]);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
ZoneId zone = ZoneId.from(environment, region);
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case region: return "region";
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} |
Very ncie! | public void processes_queue() throws IOException {
RetriggerMaintainer maintainer = new RetriggerMaintainer(tester.controller(), Duration.ofDays(1));
ApplicationId applicationId = ApplicationId.from("tenant", "app", "default");
var devApp = tester.newDeploymentContext(applicationId);
ApplicationPackage appPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
devApp.runJob(JobType.devUsEast1, appPackage);
devApp.completeRollout();
tester.deploymentTrigger().reTrigger(applicationId, JobType.devUsEast1);
tester.deploymentTrigger().reTriggerOrAddToQueue(devApp.deploymentIdIn(ZoneId.from("dev", "us-east-1")));
List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries();
assertEquals(1, retriggerEntries.size());
devApp.jobAborted(JobType.devUsEast1);
assertEquals(0, tester.jobs().active(applicationId).size());
maintainer.maintain();
retriggerEntries = tester.controller().curator().readRetriggerEntries();
assertEquals(1, retriggerEntries.size());
assertEquals(1, tester.jobs().active(applicationId).size());
devApp.runJob(JobType.devUsEast1);
assertEquals(0, tester.jobs().active(applicationId).size());
maintainer.maintain();
retriggerEntries = tester.controller().curator().readRetriggerEntries();
assertEquals(0, retriggerEntries.size());
assertEquals(0, tester.jobs().active(applicationId).size());
} | } | public void processes_queue() throws IOException {
RetriggerMaintainer maintainer = new RetriggerMaintainer(tester.controller(), Duration.ofDays(1));
ApplicationId applicationId = ApplicationId.from("tenant", "app", "default");
var devApp = tester.newDeploymentContext(applicationId);
ApplicationPackage appPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
devApp.runJob(JobType.devUsEast1, appPackage);
devApp.completeRollout();
tester.deploymentTrigger().reTrigger(applicationId, JobType.devUsEast1);
tester.deploymentTrigger().reTriggerOrAddToQueue(devApp.deploymentIdIn(ZoneId.from("dev", "us-east-1")));
List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries();
assertEquals(1, retriggerEntries.size());
devApp.jobAborted(JobType.devUsEast1);
assertEquals(0, tester.jobs().active(applicationId).size());
maintainer.maintain();
retriggerEntries = tester.controller().curator().readRetriggerEntries();
assertEquals(1, retriggerEntries.size());
assertEquals(1, tester.jobs().active(applicationId).size());
devApp.runJob(JobType.devUsEast1);
assertEquals(0, tester.jobs().active(applicationId).size());
maintainer.maintain();
retriggerEntries = tester.controller().curator().readRetriggerEntries();
assertEquals(0, retriggerEntries.size());
assertEquals(0, tester.jobs().active(applicationId).size());
} | class RetriggerMaintainerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
} | class RetriggerMaintainerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
} |
This seems a bit eager? | private RankProfileList() {
rankingConstants = new RankingConstants();
largeRankExpressions = new LargeRankExpressions();
onnxModels = new OnnxModels();
dryRunOnnxOnSetup = true;
} | dryRunOnnxOnSetup = true; | private RankProfileList() {
rankingConstants = new RankingConstants();
largeRankExpressions = new LargeRankExpressions();
onnxModels = new OnnxModels();
dryRunOnnxOnSetup = true;
} | class RankProfileList extends Derived implements RankProfilesConfig.Producer {
private static final Logger log = Logger.getLogger(RankProfileList.class.getName());
private final Map<String, RawRankProfile> rankProfiles = new java.util.LinkedHashMap<>();
private final RankingConstants rankingConstants;
private final LargeRankExpressions largeRankExpressions;
private final OnnxModels onnxModels;
private final boolean dryRunOnnxOnSetup;
public static RankProfileList empty = new RankProfileList();
/**
* Creates a rank profile
*
* @param search the search definition this is a rank profile from
* @param attributeFields the attribute fields to create a ranking for
*/
public RankProfileList(Search search,
RankingConstants rankingConstants,
LargeRankExpressions largeRankExpressions,
AttributeFields attributeFields,
RankProfileRegistry rankProfileRegistry,
QueryProfileRegistry queryProfiles,
ImportedMlModels importedModels,
ModelContext.Properties deployProperties) {
setName(search == null ? "default" : search.getName());
this.rankingConstants = rankingConstants;
this.largeRankExpressions = largeRankExpressions;
onnxModels = search == null ? new OnnxModels() : search.onnxModels();
dryRunOnnxOnSetup = deployProperties.featureFlags().dryRunOnnxOnSetup();
deriveRankProfiles(rankProfileRegistry, queryProfiles, importedModels, search, attributeFields, deployProperties);
}
private void deriveRankProfiles(RankProfileRegistry rankProfileRegistry,
QueryProfileRegistry queryProfiles,
ImportedMlModels importedModels,
Search search,
AttributeFields attributeFields,
ModelContext.Properties deployProperties) {
if (search != null) {
RawRankProfile defaultProfile = new RawRankProfile(rankProfileRegistry.get(search, "default"),
largeRankExpressions, queryProfiles, importedModels,
attributeFields, deployProperties);
rankProfiles.put(defaultProfile.getName(), defaultProfile);
}
for (RankProfile rank : rankProfileRegistry.rankProfilesOf(search)) {
if (search != null && "default".equals(rank.getName())) continue;
if (search == null) {
this.onnxModels.add(rank.onnxModels());
}
RawRankProfile rawRank = new RawRankProfile(rank, largeRankExpressions, queryProfiles, importedModels,
attributeFields, deployProperties);
rankProfiles.put(rawRank.getName(), rawRank);
}
}
public Map<String, RawRankProfile> getRankProfiles() {
return rankProfiles;
}
/** Returns the raw rank profile with the given name, or null if it is not present */
public RawRankProfile getRankProfile(String name) {
return rankProfiles.get(name);
}
public void sendTo(Collection<? extends AbstractService> services) {
rankingConstants.sendTo(services);
largeRankExpressions.sendTo(services);
onnxModels.sendTo(services);
}
@Override
public String getDerivedName() { return "rank-profiles"; }
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
for (RawRankProfile rank : rankProfiles.values() ) {
rank.getConfig(builder);
}
}
public void getConfig(RankingExpressionsConfig.Builder builder) {
largeRankExpressions.asMap().values().forEach((expr) -> builder.expression.add(new RankingExpressionsConfig.Expression.Builder().name(expr.getName()).fileref(expr.getFileReference())));
}
public void getConfig(RankingConstantsConfig.Builder builder) {
for (RankingConstant constant : rankingConstants.asMap().values()) {
if ("".equals(constant.getFileReference()))
log.warning("Illegal file reference " + constant);
else
builder.constant(new RankingConstantsConfig.Constant.Builder()
.name(constant.getName())
.fileref(constant.getFileReference())
.type(constant.getType()));
}
}
public void getConfig(OnnxModelsConfig.Builder builder) {
for (OnnxModel model : onnxModels.asMap().values()) {
if ("".equals(model.getFileReference()))
log.warning("Illegal file reference " + model);
else {
OnnxModelsConfig.Model.Builder modelBuilder = new OnnxModelsConfig.Model.Builder();
modelBuilder.dry_run_on_setup(dryRunOnnxOnSetup);
modelBuilder.name(model.getName());
modelBuilder.fileref(model.getFileReference());
model.getInputMap().forEach((name, source) -> modelBuilder.input(new OnnxModelsConfig.Model.Input.Builder().name(name).source(source)));
model.getOutputMap().forEach((name, as) -> modelBuilder.output(new OnnxModelsConfig.Model.Output.Builder().name(name).as(as)));
builder.model(modelBuilder);
}
}
}
} | class RankProfileList extends Derived implements RankProfilesConfig.Producer {
private static final Logger log = Logger.getLogger(RankProfileList.class.getName());
private final Map<String, RawRankProfile> rankProfiles = new java.util.LinkedHashMap<>();
private final RankingConstants rankingConstants;
private final LargeRankExpressions largeRankExpressions;
private final OnnxModels onnxModels;
private final boolean dryRunOnnxOnSetup;
public static RankProfileList empty = new RankProfileList();
/**
* Creates a rank profile
*
* @param search the search definition this is a rank profile from
* @param attributeFields the attribute fields to create a ranking for
*/
public RankProfileList(Search search,
RankingConstants rankingConstants,
LargeRankExpressions largeRankExpressions,
AttributeFields attributeFields,
RankProfileRegistry rankProfileRegistry,
QueryProfileRegistry queryProfiles,
ImportedMlModels importedModels,
ModelContext.Properties deployProperties) {
setName(search == null ? "default" : search.getName());
this.rankingConstants = rankingConstants;
this.largeRankExpressions = largeRankExpressions;
onnxModels = search == null ? new OnnxModels() : search.onnxModels();
dryRunOnnxOnSetup = deployProperties.featureFlags().dryRunOnnxOnSetup();
deriveRankProfiles(rankProfileRegistry, queryProfiles, importedModels, search, attributeFields, deployProperties);
}
private void deriveRankProfiles(RankProfileRegistry rankProfileRegistry,
QueryProfileRegistry queryProfiles,
ImportedMlModels importedModels,
Search search,
AttributeFields attributeFields,
ModelContext.Properties deployProperties) {
if (search != null) {
RawRankProfile defaultProfile = new RawRankProfile(rankProfileRegistry.get(search, "default"),
largeRankExpressions, queryProfiles, importedModels,
attributeFields, deployProperties);
rankProfiles.put(defaultProfile.getName(), defaultProfile);
}
for (RankProfile rank : rankProfileRegistry.rankProfilesOf(search)) {
if (search != null && "default".equals(rank.getName())) continue;
if (search == null) {
this.onnxModels.add(rank.onnxModels());
}
RawRankProfile rawRank = new RawRankProfile(rank, largeRankExpressions, queryProfiles, importedModels,
attributeFields, deployProperties);
rankProfiles.put(rawRank.getName(), rawRank);
}
}
public Map<String, RawRankProfile> getRankProfiles() {
return rankProfiles;
}
/** Returns the raw rank profile with the given name, or null if it is not present */
public RawRankProfile getRankProfile(String name) {
return rankProfiles.get(name);
}
public void sendTo(Collection<? extends AbstractService> services) {
rankingConstants.sendTo(services);
largeRankExpressions.sendTo(services);
onnxModels.sendTo(services);
}
@Override
public String getDerivedName() { return "rank-profiles"; }
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
for (RawRankProfile rank : rankProfiles.values() ) {
rank.getConfig(builder);
}
}
public void getConfig(RankingExpressionsConfig.Builder builder) {
largeRankExpressions.asMap().values().forEach((expr) -> builder.expression.add(new RankingExpressionsConfig.Expression.Builder().name(expr.getName()).fileref(expr.getFileReference())));
}
public void getConfig(RankingConstantsConfig.Builder builder) {
for (RankingConstant constant : rankingConstants.asMap().values()) {
if ("".equals(constant.getFileReference()))
log.warning("Illegal file reference " + constant);
else
builder.constant(new RankingConstantsConfig.Constant.Builder()
.name(constant.getName())
.fileref(constant.getFileReference())
.type(constant.getType()));
}
}
public void getConfig(OnnxModelsConfig.Builder builder) {
for (OnnxModel model : onnxModels.asMap().values()) {
if ("".equals(model.getFileReference()))
log.warning("Illegal file reference " + model);
else {
OnnxModelsConfig.Model.Builder modelBuilder = new OnnxModelsConfig.Model.Builder();
modelBuilder.dry_run_on_setup(dryRunOnnxOnSetup);
modelBuilder.name(model.getName());
modelBuilder.fileref(model.getFileReference());
model.getInputMap().forEach((name, source) -> modelBuilder.input(new OnnxModelsConfig.Model.Input.Builder().name(name).source(source)));
model.getOutputMap().forEach((name, as) -> modelBuilder.output(new OnnxModelsConfig.Model.Output.Builder().name(name).as(as)));
builder.model(modelBuilder);
}
}
}
} |
This is just test code. fail-fast is my preferred apparoach. | private RankProfileList() {
rankingConstants = new RankingConstants();
largeRankExpressions = new LargeRankExpressions();
onnxModels = new OnnxModels();
dryRunOnnxOnSetup = true;
} | dryRunOnnxOnSetup = true; | private RankProfileList() {
rankingConstants = new RankingConstants();
largeRankExpressions = new LargeRankExpressions();
onnxModels = new OnnxModels();
dryRunOnnxOnSetup = true;
} | class RankProfileList extends Derived implements RankProfilesConfig.Producer {
private static final Logger log = Logger.getLogger(RankProfileList.class.getName());
private final Map<String, RawRankProfile> rankProfiles = new java.util.LinkedHashMap<>();
private final RankingConstants rankingConstants;
private final LargeRankExpressions largeRankExpressions;
private final OnnxModels onnxModels;
private final boolean dryRunOnnxOnSetup;
public static RankProfileList empty = new RankProfileList();
/**
* Creates a rank profile
*
* @param search the search definition this is a rank profile from
* @param attributeFields the attribute fields to create a ranking for
*/
public RankProfileList(Search search,
RankingConstants rankingConstants,
LargeRankExpressions largeRankExpressions,
AttributeFields attributeFields,
RankProfileRegistry rankProfileRegistry,
QueryProfileRegistry queryProfiles,
ImportedMlModels importedModels,
ModelContext.Properties deployProperties) {
setName(search == null ? "default" : search.getName());
this.rankingConstants = rankingConstants;
this.largeRankExpressions = largeRankExpressions;
onnxModels = search == null ? new OnnxModels() : search.onnxModels();
dryRunOnnxOnSetup = deployProperties.featureFlags().dryRunOnnxOnSetup();
deriveRankProfiles(rankProfileRegistry, queryProfiles, importedModels, search, attributeFields, deployProperties);
}
private void deriveRankProfiles(RankProfileRegistry rankProfileRegistry,
QueryProfileRegistry queryProfiles,
ImportedMlModels importedModels,
Search search,
AttributeFields attributeFields,
ModelContext.Properties deployProperties) {
if (search != null) {
RawRankProfile defaultProfile = new RawRankProfile(rankProfileRegistry.get(search, "default"),
largeRankExpressions, queryProfiles, importedModels,
attributeFields, deployProperties);
rankProfiles.put(defaultProfile.getName(), defaultProfile);
}
for (RankProfile rank : rankProfileRegistry.rankProfilesOf(search)) {
if (search != null && "default".equals(rank.getName())) continue;
if (search == null) {
this.onnxModels.add(rank.onnxModels());
}
RawRankProfile rawRank = new RawRankProfile(rank, largeRankExpressions, queryProfiles, importedModels,
attributeFields, deployProperties);
rankProfiles.put(rawRank.getName(), rawRank);
}
}
public Map<String, RawRankProfile> getRankProfiles() {
return rankProfiles;
}
/** Returns the raw rank profile with the given name, or null if it is not present */
public RawRankProfile getRankProfile(String name) {
return rankProfiles.get(name);
}
public void sendTo(Collection<? extends AbstractService> services) {
rankingConstants.sendTo(services);
largeRankExpressions.sendTo(services);
onnxModels.sendTo(services);
}
@Override
public String getDerivedName() { return "rank-profiles"; }
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
for (RawRankProfile rank : rankProfiles.values() ) {
rank.getConfig(builder);
}
}
public void getConfig(RankingExpressionsConfig.Builder builder) {
largeRankExpressions.asMap().values().forEach((expr) -> builder.expression.add(new RankingExpressionsConfig.Expression.Builder().name(expr.getName()).fileref(expr.getFileReference())));
}
public void getConfig(RankingConstantsConfig.Builder builder) {
for (RankingConstant constant : rankingConstants.asMap().values()) {
if ("".equals(constant.getFileReference()))
log.warning("Illegal file reference " + constant);
else
builder.constant(new RankingConstantsConfig.Constant.Builder()
.name(constant.getName())
.fileref(constant.getFileReference())
.type(constant.getType()));
}
}
public void getConfig(OnnxModelsConfig.Builder builder) {
for (OnnxModel model : onnxModels.asMap().values()) {
if ("".equals(model.getFileReference()))
log.warning("Illegal file reference " + model);
else {
OnnxModelsConfig.Model.Builder modelBuilder = new OnnxModelsConfig.Model.Builder();
modelBuilder.dry_run_on_setup(dryRunOnnxOnSetup);
modelBuilder.name(model.getName());
modelBuilder.fileref(model.getFileReference());
model.getInputMap().forEach((name, source) -> modelBuilder.input(new OnnxModelsConfig.Model.Input.Builder().name(name).source(source)));
model.getOutputMap().forEach((name, as) -> modelBuilder.output(new OnnxModelsConfig.Model.Output.Builder().name(name).as(as)));
builder.model(modelBuilder);
}
}
}
} | class RankProfileList extends Derived implements RankProfilesConfig.Producer {
private static final Logger log = Logger.getLogger(RankProfileList.class.getName());
private final Map<String, RawRankProfile> rankProfiles = new java.util.LinkedHashMap<>();
private final RankingConstants rankingConstants;
private final LargeRankExpressions largeRankExpressions;
private final OnnxModels onnxModels;
private final boolean dryRunOnnxOnSetup;
public static RankProfileList empty = new RankProfileList();
/**
* Creates a rank profile
*
* @param search the search definition this is a rank profile from
* @param attributeFields the attribute fields to create a ranking for
*/
public RankProfileList(Search search,
RankingConstants rankingConstants,
LargeRankExpressions largeRankExpressions,
AttributeFields attributeFields,
RankProfileRegistry rankProfileRegistry,
QueryProfileRegistry queryProfiles,
ImportedMlModels importedModels,
ModelContext.Properties deployProperties) {
setName(search == null ? "default" : search.getName());
this.rankingConstants = rankingConstants;
this.largeRankExpressions = largeRankExpressions;
onnxModels = search == null ? new OnnxModels() : search.onnxModels();
dryRunOnnxOnSetup = deployProperties.featureFlags().dryRunOnnxOnSetup();
deriveRankProfiles(rankProfileRegistry, queryProfiles, importedModels, search, attributeFields, deployProperties);
}
private void deriveRankProfiles(RankProfileRegistry rankProfileRegistry,
QueryProfileRegistry queryProfiles,
ImportedMlModels importedModels,
Search search,
AttributeFields attributeFields,
ModelContext.Properties deployProperties) {
if (search != null) {
RawRankProfile defaultProfile = new RawRankProfile(rankProfileRegistry.get(search, "default"),
largeRankExpressions, queryProfiles, importedModels,
attributeFields, deployProperties);
rankProfiles.put(defaultProfile.getName(), defaultProfile);
}
for (RankProfile rank : rankProfileRegistry.rankProfilesOf(search)) {
if (search != null && "default".equals(rank.getName())) continue;
if (search == null) {
this.onnxModels.add(rank.onnxModels());
}
RawRankProfile rawRank = new RawRankProfile(rank, largeRankExpressions, queryProfiles, importedModels,
attributeFields, deployProperties);
rankProfiles.put(rawRank.getName(), rawRank);
}
}
public Map<String, RawRankProfile> getRankProfiles() {
return rankProfiles;
}
/** Returns the raw rank profile with the given name, or null if it is not present */
public RawRankProfile getRankProfile(String name) {
return rankProfiles.get(name);
}
public void sendTo(Collection<? extends AbstractService> services) {
rankingConstants.sendTo(services);
largeRankExpressions.sendTo(services);
onnxModels.sendTo(services);
}
@Override
public String getDerivedName() { return "rank-profiles"; }
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
for (RawRankProfile rank : rankProfiles.values() ) {
rank.getConfig(builder);
}
}
public void getConfig(RankingExpressionsConfig.Builder builder) {
largeRankExpressions.asMap().values().forEach((expr) -> builder.expression.add(new RankingExpressionsConfig.Expression.Builder().name(expr.getName()).fileref(expr.getFileReference())));
}
public void getConfig(RankingConstantsConfig.Builder builder) {
for (RankingConstant constant : rankingConstants.asMap().values()) {
if ("".equals(constant.getFileReference()))
log.warning("Illegal file reference " + constant);
else
builder.constant(new RankingConstantsConfig.Constant.Builder()
.name(constant.getName())
.fileref(constant.getFileReference())
.type(constant.getType()));
}
}
public void getConfig(OnnxModelsConfig.Builder builder) {
for (OnnxModel model : onnxModels.asMap().values()) {
if ("".equals(model.getFileReference()))
log.warning("Illegal file reference " + model);
else {
OnnxModelsConfig.Model.Builder modelBuilder = new OnnxModelsConfig.Model.Builder();
modelBuilder.dry_run_on_setup(dryRunOnnxOnSetup);
modelBuilder.name(model.getName());
modelBuilder.fileref(model.getFileReference());
model.getInputMap().forEach((name, source) -> modelBuilder.input(new OnnxModelsConfig.Model.Input.Builder().name(name).source(source)));
model.getOutputMap().forEach((name, as) -> modelBuilder.output(new OnnxModelsConfig.Model.Output.Builder().name(name).as(as)));
builder.model(modelBuilder);
}
}
}
} |
```suggestion log.log(Level.INFO, "Using {0} max streams per connection", new Object[] {streamsPerConnection}); ``` | private FeedClient createFeedClient() {
if (config.dryrun()) {
return new DryrunClient();
} else {
List<URI> endpoints = endpointUris(config);
log.info("Using endpoints " + endpoints);
int streamsPerConnection = streamsPerConnection(config);
log.log(Level.INFO, "Using {0} streams per connection", new Object[] {streamsPerConnection});
log.log(Level.INFO, "Using {0} connections", new Object[] {config.numConnections()});
FeedClientBuilder feedClientBuilder = FeedClientBuilder.create(endpoints)
.setConnectionsPerEndpoint(config.numConnections())
.setMaxStreamPerConnection(streamsPerConnection)
.setRetryStrategy(retryStrategy(config));
onFeedClientInitialization(feedClientBuilder);
return feedClientBuilder.build();
}
} | log.log(Level.INFO, "Using {0} streams per connection", new Object[] {streamsPerConnection}); | private FeedClient createFeedClient() {
if (config.dryrun()) {
return new DryrunClient();
} else {
List<URI> endpoints = endpointUris(config);
log.info("Using endpoints " + endpoints);
int streamsPerConnection = streamsPerConnection(config);
log.log(Level.INFO, "Using {0} max streams per connection", new Object[] {streamsPerConnection});
log.log(Level.INFO, "Using {0} connections", new Object[] {config.numConnections()});
FeedClientBuilder feedClientBuilder = FeedClientBuilder.create(endpoints)
.setConnectionsPerEndpoint(config.numConnections())
.setMaxStreamPerConnection(streamsPerConnection)
.setRetryStrategy(retryStrategy(config));
onFeedClientInitialization(feedClientBuilder);
return feedClientBuilder.build();
}
} | class VespaRecordWriter extends RecordWriter<Object, Object> {
private final static Logger log = Logger.getLogger(VespaRecordWriter.class.getCanonicalName());
private final VespaCounters counters;
private final VespaConfiguration config;
private boolean initialized = false;
private JsonFeeder feeder;
protected VespaRecordWriter(VespaConfiguration config, VespaCounters counters) {
this.counters = counters;
this.config = config;
}
@Override
public void write(Object key, Object data) throws IOException {
initializeOnFirstWrite();
String json = data.toString().trim();
feeder.feedSingle(json)
.whenComplete((result, error) -> {
if (error != null) {
if (error instanceof OperationParseException) {
counters.incrementDocumentsSkipped(1);
} else {
String msg = "Failed to feed single document: " + error;
log.log(Level.WARNING, msg, error);
counters.incrementDocumentsFailed(1);
}
} else {
counters.incrementDocumentsOk(1);
}
});
counters.incrementDocumentsSent(1);
if (counters.getDocumentsSent() % config.progressInterval() == 0) {
String progress = String.format("Feed progress: %d / %d / %d / %d (sent, ok, failed, skipped)",
counters.getDocumentsSent(),
counters.getDocumentsOk(),
counters.getDocumentsFailed(),
counters.getDocumentsSkipped());
log.info(progress);
}
}
@Override
public void close(TaskAttemptContext context) throws IOException {
if (feeder != null) {
feeder.close();
feeder = null;
initialized = false;
}
}
/** Override method to alter {@link FeedClient} configuration */
protected void onFeedClientInitialization(FeedClientBuilder builder) {}
private void initializeOnFirstWrite() {
if (initialized) return;
validateConfig();
useRandomizedStartupDelayIfEnabled();
feeder = createJsonStreamFeeder();
initialized = true;
}
private void validateConfig() {
if (!config.useSSL()) {
throw new IllegalArgumentException("SSL is required for this feed client implementation");
}
if (config.dataFormat() != FeedParams.DataFormat.JSON_UTF8) {
throw new IllegalArgumentException("Only JSON is support by this feed client implementation");
}
if (config.proxyHost() != null) {
log.warning(String.format("Ignoring proxy config (host='%s', port=%d)", config.proxyHost(), config.proxyPort()));
}
}
private void useRandomizedStartupDelayIfEnabled() {
if (!config.dryrun() && config.randomStartupSleepMs() > 0) {
int delay = ThreadLocalRandom.current().nextInt(config.randomStartupSleepMs());
log.info("Delaying startup by " + delay + " ms");
try {
Thread.sleep(delay);
} catch (Exception e) {}
}
}
private JsonFeeder createJsonStreamFeeder() {
FeedClient feedClient = createFeedClient();
JsonFeeder.Builder builder = JsonFeeder.builder(feedClient)
.withTimeout(Duration.ofMinutes(10));
if (config.route() != null) {
builder.withRoute(config.route());
}
return builder.build();
}
private static FeedClient.RetryStrategy retryStrategy(VespaConfiguration config) {
int maxRetries = config.numRetries();
return new FeedClient.RetryStrategy() {
@Override public int retries() { return maxRetries; }
};
}
private static int streamsPerConnection(VespaConfiguration config) {
return Math.min(256, config.maxInFlightRequests() / config.numConnections());
}
private static List<URI> endpointUris(VespaConfiguration config) {
return Arrays.stream(config.endpoint().split(","))
.map(hostname -> URI.create(String.format("https:
.collect(toList());
}
private static class DryrunClient implements FeedClient {
@Override
public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override public OperationStats stats() { return null; }
@Override public void close(boolean graceful) {}
private static CompletableFuture<Result> createSuccessResult(DocumentId documentId) {
return CompletableFuture.completedFuture(DryrunResult.create(Result.Type.success, documentId, "ok", null));
}
}
} | class VespaRecordWriter extends RecordWriter<Object, Object> {
private final static Logger log = Logger.getLogger(VespaRecordWriter.class.getCanonicalName());
private final VespaCounters counters;
private final VespaConfiguration config;
private boolean initialized = false;
private JsonFeeder feeder;
protected VespaRecordWriter(VespaConfiguration config, VespaCounters counters) {
this.counters = counters;
this.config = config;
}
@Override
public void write(Object key, Object data) throws IOException {
initializeOnFirstWrite();
String json = data.toString().trim();
feeder.feedSingle(json)
.whenComplete((result, error) -> {
if (error != null) {
if (error instanceof OperationParseException) {
counters.incrementDocumentsSkipped(1);
} else {
String msg = "Failed to feed single document: " + error;
log.log(Level.WARNING, msg, error);
counters.incrementDocumentsFailed(1);
}
} else {
counters.incrementDocumentsOk(1);
}
});
counters.incrementDocumentsSent(1);
if (counters.getDocumentsSent() % config.progressInterval() == 0) {
String progress = String.format("Feed progress: %d / %d / %d / %d (sent, ok, failed, skipped)",
counters.getDocumentsSent(),
counters.getDocumentsOk(),
counters.getDocumentsFailed(),
counters.getDocumentsSkipped());
log.info(progress);
}
}
@Override
public void close(TaskAttemptContext context) throws IOException {
if (feeder != null) {
feeder.close();
feeder = null;
initialized = false;
}
}
/** Override method to alter {@link FeedClient} configuration */
protected void onFeedClientInitialization(FeedClientBuilder builder) {}
private void initializeOnFirstWrite() {
if (initialized) return;
validateConfig();
useRandomizedStartupDelayIfEnabled();
feeder = createJsonStreamFeeder();
initialized = true;
}
private void validateConfig() {
if (!config.useSSL()) {
throw new IllegalArgumentException("SSL is required for this feed client implementation");
}
if (config.dataFormat() != FeedParams.DataFormat.JSON_UTF8) {
throw new IllegalArgumentException("Only JSON is support by this feed client implementation");
}
if (config.proxyHost() != null) {
log.warning(String.format("Ignoring proxy config (host='%s', port=%d)", config.proxyHost(), config.proxyPort()));
}
}
private void useRandomizedStartupDelayIfEnabled() {
if (!config.dryrun() && config.randomStartupSleepMs() > 0) {
int delay = ThreadLocalRandom.current().nextInt(config.randomStartupSleepMs());
log.info("Delaying startup by " + delay + " ms");
try {
Thread.sleep(delay);
} catch (Exception e) {}
}
}
private JsonFeeder createJsonStreamFeeder() {
FeedClient feedClient = createFeedClient();
JsonFeeder.Builder builder = JsonFeeder.builder(feedClient)
.withTimeout(Duration.ofMinutes(10));
if (config.route() != null) {
builder.withRoute(config.route());
}
return builder.build();
}
private static FeedClient.RetryStrategy retryStrategy(VespaConfiguration config) {
int maxRetries = config.numRetries();
return new FeedClient.RetryStrategy() {
@Override public int retries() { return maxRetries; }
};
}
private static int streamsPerConnection(VespaConfiguration config) {
return Math.min(256, config.maxInFlightRequests() / config.numConnections());
}
private static List<URI> endpointUris(VespaConfiguration config) {
return Arrays.stream(config.endpoint().split(","))
.map(hostname -> URI.create(String.format("https:
.collect(toList());
}
private static class DryrunClient implements FeedClient {
@Override
public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override
public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
return createSuccessResult(documentId);
}
@Override public OperationStats stats() { return null; }
@Override public void close(boolean graceful) {}
private static CompletableFuture<Result> createSuccessResult(DocumentId documentId) {
return CompletableFuture.completedFuture(DryrunResult.create(Result.Type.success, documentId, "ok", null));
}
}
} |
Should latch count down be invoked `onError`? `onComplete` will not be invoked in this case. | private int run(String[] rawArgs) {
boolean verbose = false;
try {
CliArguments cliArgs = CliArguments.fromRawArgs(rawArgs);
verbose = cliArgs.verboseSpecified();
if (cliArgs.helpSpecified()) {
cliArgs.printHelp(systemOut);
return 0;
}
if (cliArgs.versionSpecified()) {
systemOut.println(Vespa.VERSION);
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
FeedClient feedClient = createFeedClient(cliArgs);
JsonFeeder feeder = createJsonFeeder(feedClient, cliArgs)) {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<FeedException> fatal = new AtomicReference<>();
long startNanos = System.nanoTime();
feeder.feedMany(in, new ResultCallback() {
@Override public void onNextResult(Result result, FeedException error) { handleResult(result, error, cliArgs); }
@Override public void onError(FeedException error) { fatal.set(error); }
@Override public void onComplete() { latch.countDown(); }
});
if (cliArgs.showProgress()) {
new Thread(() -> {
try {
while ( ! latch.await(10, TimeUnit.SECONDS)) {
synchronized (printMonitor) { printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemError);}
}
}
catch (InterruptedException | IOException ignored) { }
}).start();
}
latch.await();
if (cliArgs.benchmarkModeEnabled()) {
printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemOut);
}
if (fatal.get() != null) throw fatal.get();
}
return 0;
} catch (CliArguments.CliArgumentsException | IOException | FeedException e) {
return handleException(verbose, e);
} catch (Exception e) {
return handleException(verbose, "Unknown failure: " + e.getMessage(), e);
}
} | @Override public void onError(FeedException error) { fatal.set(error); } | private int run(String[] rawArgs) {
boolean verbose = false;
try {
CliArguments cliArgs = CliArguments.fromRawArgs(rawArgs);
verbose = cliArgs.verboseSpecified();
if (cliArgs.helpSpecified()) {
cliArgs.printHelp(systemOut);
return 0;
}
if (cliArgs.versionSpecified()) {
systemOut.println(Vespa.VERSION);
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
FeedClient feedClient = createFeedClient(cliArgs);
JsonFeeder feeder = createJsonFeeder(feedClient, cliArgs)) {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<FeedException> fatal = new AtomicReference<>();
long startNanos = System.nanoTime();
feeder.feedMany(in, new ResultCallback() {
@Override public void onNextResult(Result result, FeedException error) { handleResult(result, error, cliArgs); }
@Override public void onError(FeedException error) { fatal.set(error); latch.countDown(); }
@Override public void onComplete() { latch.countDown(); }
});
if (cliArgs.showProgress()) {
Thread progressPrinter = new Thread(() -> {
try {
while ( ! latch.await(10, TimeUnit.SECONDS)) {
synchronized (printMonitor) { printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemError);}
}
}
catch (InterruptedException | IOException ignored) { }
});
progressPrinter.setDaemon(true);
progressPrinter.start();
}
latch.await();
if (cliArgs.benchmarkModeEnabled()) {
printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemOut);
}
if (fatal.get() != null) throw fatal.get();
}
return 0;
} catch (CliArguments.CliArgumentsException | IOException | FeedException e) {
return handleException(verbose, e);
} catch (Exception e) {
return handleException(verbose, "Unknown failure: " + e.getMessage(), e);
}
} | class CliClient {
private final PrintStream systemOut;
private final PrintStream systemError;
private final InputStream systemIn;
private final Properties systemProperties;
private final Map<String, String> environmentVariables;
private final Object printMonitor = new Object();
private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn,
Properties systemProperties, Map<String, String> environmentVariables) {
this.systemOut = systemOut;
this.systemError = systemError;
this.systemIn = systemIn;
this.systemProperties = systemProperties;
this.environmentVariables = environmentVariables;
}
public static void main(String[] args) {
CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties(), System.getenv());
int exitCode = client.run(args);
System.exit(exitCode);
}
private void handleResult(Result result, FeedException error, CliArguments args) {
if (error != null) {
if (args.showErrors()) synchronized (printMonitor) {
systemError.println(error.getMessage());
if (error instanceof ResultException) ((ResultException) error).getTrace().ifPresent(systemError::println);
if (args.verboseSpecified()) error.printStackTrace(systemError);
}
}
else {
if (args.showSuccesses()) synchronized (printMonitor) {
systemError.println(result.documentId() + ": " + result.type());
result.traceMessage().ifPresent(systemError::println);
result.resultMessage().ifPresent(systemError::println);
}
}
}
private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException {
FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
cliArgs.connections().ifPresent(builder::setConnectionsPerEndpoint);
cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxStreamPerConnection);
if (cliArgs.sslHostnameVerificationDisabled()) {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
cliArgs.caCertificates().ifPresent(builder::setCaCertificatesFile);
cliArgs.headers().forEach(builder::addRequestHeader);
return builder.build();
}
private static JsonFeeder createJsonFeeder(FeedClient feedClient, CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
JsonFeeder.Builder builder = JsonFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
return builder.build();
}
private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get());
}
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
systemError.println(message);
if (verbose) {
exception.printStackTrace(systemError);
}
return 1;
}
private static class AcceptAllHostnameVerifier implements HostnameVerifier {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
static void printBenchmarkResult(long durationNanos, OperationStats stats, OutputStream systemOut) throws IOException {
JsonFactory factory = new JsonFactory();
long okCount = stats.successes();
long errorCount = stats.requests() - okCount;
double throughput = okCount * 1e9 / Math.max(1, durationNanos);
try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
generator.writeStartObject();
generator.writeNumberField("feeder.runtime", durationNanos / 1_000_000);
generator.writeNumberField("feeder.okcount", okCount);
generator.writeNumberField("feeder.errorcount", errorCount);
generator.writeNumberField("feeder.throughput", throughput);
generator.writeNumberField("feeder.minlatency", stats.minLatencyMillis());
generator.writeNumberField("feeder.avglatency", stats.averageLatencyMillis());
generator.writeNumberField("feeder.maxlatency", stats.maxLatencyMillis());
generator.writeNumberField("feeder.bytessent", stats.bytesSent());
generator.writeNumberField("feeder.bytesreceived", stats.bytesReceived());
generator.writeObjectFieldStart("feeder.responsecodes");
for (Map.Entry<Integer, Long> entry : stats.responsesByCode().entrySet())
generator.writeNumberField(Integer.toString(entry.getKey()), entry.getValue());
generator.writeEndObject();
generator.writeEndObject();
}
}
} | class CliClient {
private final PrintStream systemOut;
private final PrintStream systemError;
private final InputStream systemIn;
private final Properties systemProperties;
private final Map<String, String> environmentVariables;
private final Object printMonitor = new Object();
private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn,
Properties systemProperties, Map<String, String> environmentVariables) {
this.systemOut = systemOut;
this.systemError = systemError;
this.systemIn = systemIn;
this.systemProperties = systemProperties;
this.environmentVariables = environmentVariables;
}
public static void main(String[] args) {
CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties(), System.getenv());
int exitCode = client.run(args);
System.exit(exitCode);
}
private void handleResult(Result result, FeedException error, CliArguments args) {
if (error != null) {
if (args.showErrors()) synchronized (printMonitor) {
systemError.println(error.getMessage());
if (error instanceof ResultException) ((ResultException) error).getTrace().ifPresent(systemError::println);
if (args.verboseSpecified()) error.printStackTrace(systemError);
}
}
else {
if (args.showSuccesses()) synchronized (printMonitor) {
systemError.println(result.documentId() + ": " + result.type());
result.traceMessage().ifPresent(systemError::println);
result.resultMessage().ifPresent(systemError::println);
}
}
}
private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException {
FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
cliArgs.connections().ifPresent(builder::setConnectionsPerEndpoint);
cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxStreamPerConnection);
if (cliArgs.sslHostnameVerificationDisabled()) {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
cliArgs.caCertificates().ifPresent(builder::setCaCertificatesFile);
cliArgs.headers().forEach(builder::addRequestHeader);
builder.setDryrun(cliArgs.dryrunEnabled());
return builder.build();
}
private static JsonFeeder createJsonFeeder(FeedClient feedClient, CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
JsonFeeder.Builder builder = JsonFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
return builder.build();
}
private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get());
}
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
systemError.println(message);
if (verbose) {
exception.printStackTrace(systemError);
}
return 1;
}
private static class AcceptAllHostnameVerifier implements HostnameVerifier {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
static void printBenchmarkResult(long durationNanos, OperationStats stats, OutputStream systemOut) throws IOException {
JsonFactory factory = new JsonFactory();
long okCount = stats.successes();
long errorCount = stats.requests() - okCount;
double throughput = okCount * 1e9 / Math.max(1, durationNanos);
try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
generator.writeStartObject();
generator.writeNumberField("feeder.runtime", durationNanos / 1_000_000);
generator.writeNumberField("feeder.okcount", okCount);
generator.writeNumberField("feeder.errorcount", errorCount);
generator.writeNumberField("feeder.throughput", throughput);
generator.writeNumberField("feeder.minlatency", stats.minLatencyMillis());
generator.writeNumberField("feeder.avglatency", stats.averageLatencyMillis());
generator.writeNumberField("feeder.maxlatency", stats.maxLatencyMillis());
generator.writeNumberField("feeder.bytessent", stats.bytesSent());
generator.writeNumberField("feeder.bytesreceived", stats.bytesReceived());
generator.writeObjectFieldStart("feeder.responsecodes");
for (Map.Entry<Integer, Long> entry : stats.responsesByCode().entrySet())
generator.writeNumberField(Integer.toString(entry.getKey()), entry.getValue());
generator.writeEndObject();
generator.writeEndObject();
}
}
} |
Free threads are ok since this is a CLI, otherwise this would be a potential resource leak. Make sure it's tagged as daemon. | private int run(String[] rawArgs) {
boolean verbose = false;
try {
CliArguments cliArgs = CliArguments.fromRawArgs(rawArgs);
verbose = cliArgs.verboseSpecified();
if (cliArgs.helpSpecified()) {
cliArgs.printHelp(systemOut);
return 0;
}
if (cliArgs.versionSpecified()) {
systemOut.println(Vespa.VERSION);
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
FeedClient feedClient = createFeedClient(cliArgs);
JsonFeeder feeder = createJsonFeeder(feedClient, cliArgs)) {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<FeedException> fatal = new AtomicReference<>();
long startNanos = System.nanoTime();
feeder.feedMany(in, new ResultCallback() {
@Override public void onNextResult(Result result, FeedException error) { handleResult(result, error, cliArgs); }
@Override public void onError(FeedException error) { fatal.set(error); }
@Override public void onComplete() { latch.countDown(); }
});
if (cliArgs.showProgress()) {
new Thread(() -> {
try {
while ( ! latch.await(10, TimeUnit.SECONDS)) {
synchronized (printMonitor) { printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemError);}
}
}
catch (InterruptedException | IOException ignored) { }
}).start();
}
latch.await();
if (cliArgs.benchmarkModeEnabled()) {
printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemOut);
}
if (fatal.get() != null) throw fatal.get();
}
return 0;
} catch (CliArguments.CliArgumentsException | IOException | FeedException e) {
return handleException(verbose, e);
} catch (Exception e) {
return handleException(verbose, "Unknown failure: " + e.getMessage(), e);
}
} | new Thread(() -> { | private int run(String[] rawArgs) {
boolean verbose = false;
try {
CliArguments cliArgs = CliArguments.fromRawArgs(rawArgs);
verbose = cliArgs.verboseSpecified();
if (cliArgs.helpSpecified()) {
cliArgs.printHelp(systemOut);
return 0;
}
if (cliArgs.versionSpecified()) {
systemOut.println(Vespa.VERSION);
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
FeedClient feedClient = createFeedClient(cliArgs);
JsonFeeder feeder = createJsonFeeder(feedClient, cliArgs)) {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<FeedException> fatal = new AtomicReference<>();
long startNanos = System.nanoTime();
feeder.feedMany(in, new ResultCallback() {
@Override public void onNextResult(Result result, FeedException error) { handleResult(result, error, cliArgs); }
@Override public void onError(FeedException error) { fatal.set(error); latch.countDown(); }
@Override public void onComplete() { latch.countDown(); }
});
if (cliArgs.showProgress()) {
Thread progressPrinter = new Thread(() -> {
try {
while ( ! latch.await(10, TimeUnit.SECONDS)) {
synchronized (printMonitor) { printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemError);}
}
}
catch (InterruptedException | IOException ignored) { }
});
progressPrinter.setDaemon(true);
progressPrinter.start();
}
latch.await();
if (cliArgs.benchmarkModeEnabled()) {
printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemOut);
}
if (fatal.get() != null) throw fatal.get();
}
return 0;
} catch (CliArguments.CliArgumentsException | IOException | FeedException e) {
return handleException(verbose, e);
} catch (Exception e) {
return handleException(verbose, "Unknown failure: " + e.getMessage(), e);
}
} | class CliClient {
private final PrintStream systemOut;
private final PrintStream systemError;
private final InputStream systemIn;
private final Properties systemProperties;
private final Map<String, String> environmentVariables;
private final Object printMonitor = new Object();
private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn,
Properties systemProperties, Map<String, String> environmentVariables) {
this.systemOut = systemOut;
this.systemError = systemError;
this.systemIn = systemIn;
this.systemProperties = systemProperties;
this.environmentVariables = environmentVariables;
}
public static void main(String[] args) {
CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties(), System.getenv());
int exitCode = client.run(args);
System.exit(exitCode);
}
private void handleResult(Result result, FeedException error, CliArguments args) {
if (error != null) {
if (args.showErrors()) synchronized (printMonitor) {
systemError.println(error.getMessage());
if (error instanceof ResultException) ((ResultException) error).getTrace().ifPresent(systemError::println);
if (args.verboseSpecified()) error.printStackTrace(systemError);
}
}
else {
if (args.showSuccesses()) synchronized (printMonitor) {
systemError.println(result.documentId() + ": " + result.type());
result.traceMessage().ifPresent(systemError::println);
result.resultMessage().ifPresent(systemError::println);
}
}
}
private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException {
FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
cliArgs.connections().ifPresent(builder::setConnectionsPerEndpoint);
cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxStreamPerConnection);
if (cliArgs.sslHostnameVerificationDisabled()) {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
cliArgs.caCertificates().ifPresent(builder::setCaCertificatesFile);
cliArgs.headers().forEach(builder::addRequestHeader);
return builder.build();
}
private static JsonFeeder createJsonFeeder(FeedClient feedClient, CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
JsonFeeder.Builder builder = JsonFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
return builder.build();
}
private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get());
}
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
systemError.println(message);
if (verbose) {
exception.printStackTrace(systemError);
}
return 1;
}
private static class AcceptAllHostnameVerifier implements HostnameVerifier {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
static void printBenchmarkResult(long durationNanos, OperationStats stats, OutputStream systemOut) throws IOException {
JsonFactory factory = new JsonFactory();
long okCount = stats.successes();
long errorCount = stats.requests() - okCount;
double throughput = okCount * 1e9 / Math.max(1, durationNanos);
try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
generator.writeStartObject();
generator.writeNumberField("feeder.runtime", durationNanos / 1_000_000);
generator.writeNumberField("feeder.okcount", okCount);
generator.writeNumberField("feeder.errorcount", errorCount);
generator.writeNumberField("feeder.throughput", throughput);
generator.writeNumberField("feeder.minlatency", stats.minLatencyMillis());
generator.writeNumberField("feeder.avglatency", stats.averageLatencyMillis());
generator.writeNumberField("feeder.maxlatency", stats.maxLatencyMillis());
generator.writeNumberField("feeder.bytessent", stats.bytesSent());
generator.writeNumberField("feeder.bytesreceived", stats.bytesReceived());
generator.writeObjectFieldStart("feeder.responsecodes");
for (Map.Entry<Integer, Long> entry : stats.responsesByCode().entrySet())
generator.writeNumberField(Integer.toString(entry.getKey()), entry.getValue());
generator.writeEndObject();
generator.writeEndObject();
}
}
} | class CliClient {
private final PrintStream systemOut;
private final PrintStream systemError;
private final InputStream systemIn;
private final Properties systemProperties;
private final Map<String, String> environmentVariables;
private final Object printMonitor = new Object();
private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn,
Properties systemProperties, Map<String, String> environmentVariables) {
this.systemOut = systemOut;
this.systemError = systemError;
this.systemIn = systemIn;
this.systemProperties = systemProperties;
this.environmentVariables = environmentVariables;
}
public static void main(String[] args) {
CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties(), System.getenv());
int exitCode = client.run(args);
System.exit(exitCode);
}
private void handleResult(Result result, FeedException error, CliArguments args) {
if (error != null) {
if (args.showErrors()) synchronized (printMonitor) {
systemError.println(error.getMessage());
if (error instanceof ResultException) ((ResultException) error).getTrace().ifPresent(systemError::println);
if (args.verboseSpecified()) error.printStackTrace(systemError);
}
}
else {
if (args.showSuccesses()) synchronized (printMonitor) {
systemError.println(result.documentId() + ": " + result.type());
result.traceMessage().ifPresent(systemError::println);
result.resultMessage().ifPresent(systemError::println);
}
}
}
private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException {
FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
cliArgs.connections().ifPresent(builder::setConnectionsPerEndpoint);
cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxStreamPerConnection);
if (cliArgs.sslHostnameVerificationDisabled()) {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
cliArgs.caCertificates().ifPresent(builder::setCaCertificatesFile);
cliArgs.headers().forEach(builder::addRequestHeader);
builder.setDryrun(cliArgs.dryrunEnabled());
return builder.build();
}
private static JsonFeeder createJsonFeeder(FeedClient feedClient, CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
JsonFeeder.Builder builder = JsonFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
return builder.build();
}
private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get());
}
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
systemError.println(message);
if (verbose) {
exception.printStackTrace(systemError);
}
return 1;
}
private static class AcceptAllHostnameVerifier implements HostnameVerifier {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
static void printBenchmarkResult(long durationNanos, OperationStats stats, OutputStream systemOut) throws IOException {
JsonFactory factory = new JsonFactory();
long okCount = stats.successes();
long errorCount = stats.requests() - okCount;
double throughput = okCount * 1e9 / Math.max(1, durationNanos);
try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
generator.writeStartObject();
generator.writeNumberField("feeder.runtime", durationNanos / 1_000_000);
generator.writeNumberField("feeder.okcount", okCount);
generator.writeNumberField("feeder.errorcount", errorCount);
generator.writeNumberField("feeder.throughput", throughput);
generator.writeNumberField("feeder.minlatency", stats.minLatencyMillis());
generator.writeNumberField("feeder.avglatency", stats.averageLatencyMillis());
generator.writeNumberField("feeder.maxlatency", stats.maxLatencyMillis());
generator.writeNumberField("feeder.bytessent", stats.bytesSent());
generator.writeNumberField("feeder.bytesreceived", stats.bytesReceived());
generator.writeObjectFieldStart("feeder.responsecodes");
for (Map.Entry<Integer, Long> entry : stats.responsesByCode().entrySet())
generator.writeNumberField(Integer.toString(entry.getKey()), entry.getValue());
generator.writeEndObject();
generator.writeEndObject();
}
}
} |
Oh, thne it definiteely should :) | private int run(String[] rawArgs) {
boolean verbose = false;
try {
CliArguments cliArgs = CliArguments.fromRawArgs(rawArgs);
verbose = cliArgs.verboseSpecified();
if (cliArgs.helpSpecified()) {
cliArgs.printHelp(systemOut);
return 0;
}
if (cliArgs.versionSpecified()) {
systemOut.println(Vespa.VERSION);
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
FeedClient feedClient = createFeedClient(cliArgs);
JsonFeeder feeder = createJsonFeeder(feedClient, cliArgs)) {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<FeedException> fatal = new AtomicReference<>();
long startNanos = System.nanoTime();
feeder.feedMany(in, new ResultCallback() {
@Override public void onNextResult(Result result, FeedException error) { handleResult(result, error, cliArgs); }
@Override public void onError(FeedException error) { fatal.set(error); }
@Override public void onComplete() { latch.countDown(); }
});
if (cliArgs.showProgress()) {
new Thread(() -> {
try {
while ( ! latch.await(10, TimeUnit.SECONDS)) {
synchronized (printMonitor) { printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemError);}
}
}
catch (InterruptedException | IOException ignored) { }
}).start();
}
latch.await();
if (cliArgs.benchmarkModeEnabled()) {
printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemOut);
}
if (fatal.get() != null) throw fatal.get();
}
return 0;
} catch (CliArguments.CliArgumentsException | IOException | FeedException e) {
return handleException(verbose, e);
} catch (Exception e) {
return handleException(verbose, "Unknown failure: " + e.getMessage(), e);
}
} | @Override public void onError(FeedException error) { fatal.set(error); } | private int run(String[] rawArgs) {
boolean verbose = false;
try {
CliArguments cliArgs = CliArguments.fromRawArgs(rawArgs);
verbose = cliArgs.verboseSpecified();
if (cliArgs.helpSpecified()) {
cliArgs.printHelp(systemOut);
return 0;
}
if (cliArgs.versionSpecified()) {
systemOut.println(Vespa.VERSION);
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
FeedClient feedClient = createFeedClient(cliArgs);
JsonFeeder feeder = createJsonFeeder(feedClient, cliArgs)) {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<FeedException> fatal = new AtomicReference<>();
long startNanos = System.nanoTime();
feeder.feedMany(in, new ResultCallback() {
@Override public void onNextResult(Result result, FeedException error) { handleResult(result, error, cliArgs); }
@Override public void onError(FeedException error) { fatal.set(error); latch.countDown(); }
@Override public void onComplete() { latch.countDown(); }
});
if (cliArgs.showProgress()) {
Thread progressPrinter = new Thread(() -> {
try {
while ( ! latch.await(10, TimeUnit.SECONDS)) {
synchronized (printMonitor) { printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemError);}
}
}
catch (InterruptedException | IOException ignored) { }
});
progressPrinter.setDaemon(true);
progressPrinter.start();
}
latch.await();
if (cliArgs.benchmarkModeEnabled()) {
printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemOut);
}
if (fatal.get() != null) throw fatal.get();
}
return 0;
} catch (CliArguments.CliArgumentsException | IOException | FeedException e) {
return handleException(verbose, e);
} catch (Exception e) {
return handleException(verbose, "Unknown failure: " + e.getMessage(), e);
}
} | class CliClient {
private final PrintStream systemOut;
private final PrintStream systemError;
private final InputStream systemIn;
private final Properties systemProperties;
private final Map<String, String> environmentVariables;
private final Object printMonitor = new Object();
private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn,
Properties systemProperties, Map<String, String> environmentVariables) {
this.systemOut = systemOut;
this.systemError = systemError;
this.systemIn = systemIn;
this.systemProperties = systemProperties;
this.environmentVariables = environmentVariables;
}
public static void main(String[] args) {
CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties(), System.getenv());
int exitCode = client.run(args);
System.exit(exitCode);
}
private void handleResult(Result result, FeedException error, CliArguments args) {
if (error != null) {
if (args.showErrors()) synchronized (printMonitor) {
systemError.println(error.getMessage());
if (error instanceof ResultException) ((ResultException) error).getTrace().ifPresent(systemError::println);
if (args.verboseSpecified()) error.printStackTrace(systemError);
}
}
else {
if (args.showSuccesses()) synchronized (printMonitor) {
systemError.println(result.documentId() + ": " + result.type());
result.traceMessage().ifPresent(systemError::println);
result.resultMessage().ifPresent(systemError::println);
}
}
}
private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException {
FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
cliArgs.connections().ifPresent(builder::setConnectionsPerEndpoint);
cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxStreamPerConnection);
if (cliArgs.sslHostnameVerificationDisabled()) {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
cliArgs.caCertificates().ifPresent(builder::setCaCertificatesFile);
cliArgs.headers().forEach(builder::addRequestHeader);
return builder.build();
}
private static JsonFeeder createJsonFeeder(FeedClient feedClient, CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
JsonFeeder.Builder builder = JsonFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
return builder.build();
}
private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get());
}
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
systemError.println(message);
if (verbose) {
exception.printStackTrace(systemError);
}
return 1;
}
private static class AcceptAllHostnameVerifier implements HostnameVerifier {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
static void printBenchmarkResult(long durationNanos, OperationStats stats, OutputStream systemOut) throws IOException {
JsonFactory factory = new JsonFactory();
long okCount = stats.successes();
long errorCount = stats.requests() - okCount;
double throughput = okCount * 1e9 / Math.max(1, durationNanos);
try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
generator.writeStartObject();
generator.writeNumberField("feeder.runtime", durationNanos / 1_000_000);
generator.writeNumberField("feeder.okcount", okCount);
generator.writeNumberField("feeder.errorcount", errorCount);
generator.writeNumberField("feeder.throughput", throughput);
generator.writeNumberField("feeder.minlatency", stats.minLatencyMillis());
generator.writeNumberField("feeder.avglatency", stats.averageLatencyMillis());
generator.writeNumberField("feeder.maxlatency", stats.maxLatencyMillis());
generator.writeNumberField("feeder.bytessent", stats.bytesSent());
generator.writeNumberField("feeder.bytesreceived", stats.bytesReceived());
generator.writeObjectFieldStart("feeder.responsecodes");
for (Map.Entry<Integer, Long> entry : stats.responsesByCode().entrySet())
generator.writeNumberField(Integer.toString(entry.getKey()), entry.getValue());
generator.writeEndObject();
generator.writeEndObject();
}
}
} | class CliClient {
private final PrintStream systemOut;
private final PrintStream systemError;
private final InputStream systemIn;
private final Properties systemProperties;
private final Map<String, String> environmentVariables;
private final Object printMonitor = new Object();
private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn,
Properties systemProperties, Map<String, String> environmentVariables) {
this.systemOut = systemOut;
this.systemError = systemError;
this.systemIn = systemIn;
this.systemProperties = systemProperties;
this.environmentVariables = environmentVariables;
}
public static void main(String[] args) {
CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties(), System.getenv());
int exitCode = client.run(args);
System.exit(exitCode);
}
private void handleResult(Result result, FeedException error, CliArguments args) {
if (error != null) {
if (args.showErrors()) synchronized (printMonitor) {
systemError.println(error.getMessage());
if (error instanceof ResultException) ((ResultException) error).getTrace().ifPresent(systemError::println);
if (args.verboseSpecified()) error.printStackTrace(systemError);
}
}
else {
if (args.showSuccesses()) synchronized (printMonitor) {
systemError.println(result.documentId() + ": " + result.type());
result.traceMessage().ifPresent(systemError::println);
result.resultMessage().ifPresent(systemError::println);
}
}
}
private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException {
FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
cliArgs.connections().ifPresent(builder::setConnectionsPerEndpoint);
cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxStreamPerConnection);
if (cliArgs.sslHostnameVerificationDisabled()) {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
cliArgs.caCertificates().ifPresent(builder::setCaCertificatesFile);
cliArgs.headers().forEach(builder::addRequestHeader);
builder.setDryrun(cliArgs.dryrunEnabled());
return builder.build();
}
private static JsonFeeder createJsonFeeder(FeedClient feedClient, CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
JsonFeeder.Builder builder = JsonFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
return builder.build();
}
private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get());
}
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
systemError.println(message);
if (verbose) {
exception.printStackTrace(systemError);
}
return 1;
}
private static class AcceptAllHostnameVerifier implements HostnameVerifier {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
static void printBenchmarkResult(long durationNanos, OperationStats stats, OutputStream systemOut) throws IOException {
JsonFactory factory = new JsonFactory();
long okCount = stats.successes();
long errorCount = stats.requests() - okCount;
double throughput = okCount * 1e9 / Math.max(1, durationNanos);
try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
generator.writeStartObject();
generator.writeNumberField("feeder.runtime", durationNanos / 1_000_000);
generator.writeNumberField("feeder.okcount", okCount);
generator.writeNumberField("feeder.errorcount", errorCount);
generator.writeNumberField("feeder.throughput", throughput);
generator.writeNumberField("feeder.minlatency", stats.minLatencyMillis());
generator.writeNumberField("feeder.avglatency", stats.averageLatencyMillis());
generator.writeNumberField("feeder.maxlatency", stats.maxLatencyMillis());
generator.writeNumberField("feeder.bytessent", stats.bytesSent());
generator.writeNumberField("feeder.bytesreceived", stats.bytesReceived());
generator.writeObjectFieldStart("feeder.responsecodes");
for (Map.Entry<Integer, Long> entry : stats.responsesByCode().entrySet())
generator.writeNumberField(Integer.toString(entry.getKey()), entry.getValue());
generator.writeEndObject();
generator.writeEndObject();
}
}
} |
Will do. | private int run(String[] rawArgs) {
boolean verbose = false;
try {
CliArguments cliArgs = CliArguments.fromRawArgs(rawArgs);
verbose = cliArgs.verboseSpecified();
if (cliArgs.helpSpecified()) {
cliArgs.printHelp(systemOut);
return 0;
}
if (cliArgs.versionSpecified()) {
systemOut.println(Vespa.VERSION);
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
FeedClient feedClient = createFeedClient(cliArgs);
JsonFeeder feeder = createJsonFeeder(feedClient, cliArgs)) {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<FeedException> fatal = new AtomicReference<>();
long startNanos = System.nanoTime();
feeder.feedMany(in, new ResultCallback() {
@Override public void onNextResult(Result result, FeedException error) { handleResult(result, error, cliArgs); }
@Override public void onError(FeedException error) { fatal.set(error); }
@Override public void onComplete() { latch.countDown(); }
});
if (cliArgs.showProgress()) {
new Thread(() -> {
try {
while ( ! latch.await(10, TimeUnit.SECONDS)) {
synchronized (printMonitor) { printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemError);}
}
}
catch (InterruptedException | IOException ignored) { }
}).start();
}
latch.await();
if (cliArgs.benchmarkModeEnabled()) {
printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemOut);
}
if (fatal.get() != null) throw fatal.get();
}
return 0;
} catch (CliArguments.CliArgumentsException | IOException | FeedException e) {
return handleException(verbose, e);
} catch (Exception e) {
return handleException(verbose, "Unknown failure: " + e.getMessage(), e);
}
} | new Thread(() -> { | private int run(String[] rawArgs) {
boolean verbose = false;
try {
CliArguments cliArgs = CliArguments.fromRawArgs(rawArgs);
verbose = cliArgs.verboseSpecified();
if (cliArgs.helpSpecified()) {
cliArgs.printHelp(systemOut);
return 0;
}
if (cliArgs.versionSpecified()) {
systemOut.println(Vespa.VERSION);
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
FeedClient feedClient = createFeedClient(cliArgs);
JsonFeeder feeder = createJsonFeeder(feedClient, cliArgs)) {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<FeedException> fatal = new AtomicReference<>();
long startNanos = System.nanoTime();
feeder.feedMany(in, new ResultCallback() {
@Override public void onNextResult(Result result, FeedException error) { handleResult(result, error, cliArgs); }
@Override public void onError(FeedException error) { fatal.set(error); latch.countDown(); }
@Override public void onComplete() { latch.countDown(); }
});
if (cliArgs.showProgress()) {
Thread progressPrinter = new Thread(() -> {
try {
while ( ! latch.await(10, TimeUnit.SECONDS)) {
synchronized (printMonitor) { printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemError);}
}
}
catch (InterruptedException | IOException ignored) { }
});
progressPrinter.setDaemon(true);
progressPrinter.start();
}
latch.await();
if (cliArgs.benchmarkModeEnabled()) {
printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemOut);
}
if (fatal.get() != null) throw fatal.get();
}
return 0;
} catch (CliArguments.CliArgumentsException | IOException | FeedException e) {
return handleException(verbose, e);
} catch (Exception e) {
return handleException(verbose, "Unknown failure: " + e.getMessage(), e);
}
} | class CliClient {
private final PrintStream systemOut;
private final PrintStream systemError;
private final InputStream systemIn;
private final Properties systemProperties;
private final Map<String, String> environmentVariables;
private final Object printMonitor = new Object();
private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn,
Properties systemProperties, Map<String, String> environmentVariables) {
this.systemOut = systemOut;
this.systemError = systemError;
this.systemIn = systemIn;
this.systemProperties = systemProperties;
this.environmentVariables = environmentVariables;
}
public static void main(String[] args) {
CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties(), System.getenv());
int exitCode = client.run(args);
System.exit(exitCode);
}
private void handleResult(Result result, FeedException error, CliArguments args) {
if (error != null) {
if (args.showErrors()) synchronized (printMonitor) {
systemError.println(error.getMessage());
if (error instanceof ResultException) ((ResultException) error).getTrace().ifPresent(systemError::println);
if (args.verboseSpecified()) error.printStackTrace(systemError);
}
}
else {
if (args.showSuccesses()) synchronized (printMonitor) {
systemError.println(result.documentId() + ": " + result.type());
result.traceMessage().ifPresent(systemError::println);
result.resultMessage().ifPresent(systemError::println);
}
}
}
private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException {
FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
cliArgs.connections().ifPresent(builder::setConnectionsPerEndpoint);
cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxStreamPerConnection);
if (cliArgs.sslHostnameVerificationDisabled()) {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
cliArgs.caCertificates().ifPresent(builder::setCaCertificatesFile);
cliArgs.headers().forEach(builder::addRequestHeader);
return builder.build();
}
private static JsonFeeder createJsonFeeder(FeedClient feedClient, CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
JsonFeeder.Builder builder = JsonFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
return builder.build();
}
private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get());
}
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
systemError.println(message);
if (verbose) {
exception.printStackTrace(systemError);
}
return 1;
}
private static class AcceptAllHostnameVerifier implements HostnameVerifier {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
static void printBenchmarkResult(long durationNanos, OperationStats stats, OutputStream systemOut) throws IOException {
JsonFactory factory = new JsonFactory();
long okCount = stats.successes();
long errorCount = stats.requests() - okCount;
double throughput = okCount * 1e9 / Math.max(1, durationNanos);
try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
generator.writeStartObject();
generator.writeNumberField("feeder.runtime", durationNanos / 1_000_000);
generator.writeNumberField("feeder.okcount", okCount);
generator.writeNumberField("feeder.errorcount", errorCount);
generator.writeNumberField("feeder.throughput", throughput);
generator.writeNumberField("feeder.minlatency", stats.minLatencyMillis());
generator.writeNumberField("feeder.avglatency", stats.averageLatencyMillis());
generator.writeNumberField("feeder.maxlatency", stats.maxLatencyMillis());
generator.writeNumberField("feeder.bytessent", stats.bytesSent());
generator.writeNumberField("feeder.bytesreceived", stats.bytesReceived());
generator.writeObjectFieldStart("feeder.responsecodes");
for (Map.Entry<Integer, Long> entry : stats.responsesByCode().entrySet())
generator.writeNumberField(Integer.toString(entry.getKey()), entry.getValue());
generator.writeEndObject();
generator.writeEndObject();
}
}
} | class CliClient {
private final PrintStream systemOut;
private final PrintStream systemError;
private final InputStream systemIn;
private final Properties systemProperties;
private final Map<String, String> environmentVariables;
private final Object printMonitor = new Object();
private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn,
Properties systemProperties, Map<String, String> environmentVariables) {
this.systemOut = systemOut;
this.systemError = systemError;
this.systemIn = systemIn;
this.systemProperties = systemProperties;
this.environmentVariables = environmentVariables;
}
public static void main(String[] args) {
CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties(), System.getenv());
int exitCode = client.run(args);
System.exit(exitCode);
}
private void handleResult(Result result, FeedException error, CliArguments args) {
if (error != null) {
if (args.showErrors()) synchronized (printMonitor) {
systemError.println(error.getMessage());
if (error instanceof ResultException) ((ResultException) error).getTrace().ifPresent(systemError::println);
if (args.verboseSpecified()) error.printStackTrace(systemError);
}
}
else {
if (args.showSuccesses()) synchronized (printMonitor) {
systemError.println(result.documentId() + ": " + result.type());
result.traceMessage().ifPresent(systemError::println);
result.resultMessage().ifPresent(systemError::println);
}
}
}
private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException {
FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
cliArgs.connections().ifPresent(builder::setConnectionsPerEndpoint);
cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxStreamPerConnection);
if (cliArgs.sslHostnameVerificationDisabled()) {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
cliArgs.caCertificates().ifPresent(builder::setCaCertificatesFile);
cliArgs.headers().forEach(builder::addRequestHeader);
builder.setDryrun(cliArgs.dryrunEnabled());
return builder.build();
}
private static JsonFeeder createJsonFeeder(FeedClient feedClient, CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
JsonFeeder.Builder builder = JsonFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
return builder.build();
}
private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get());
}
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
systemError.println(message);
if (verbose) {
exception.printStackTrace(systemError);
}
return 1;
}
private static class AcceptAllHostnameVerifier implements HostnameVerifier {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
static void printBenchmarkResult(long durationNanos, OperationStats stats, OutputStream systemOut) throws IOException {
JsonFactory factory = new JsonFactory();
long okCount = stats.successes();
long errorCount = stats.requests() - okCount;
double throughput = okCount * 1e9 / Math.max(1, durationNanos);
try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
generator.writeStartObject();
generator.writeNumberField("feeder.runtime", durationNanos / 1_000_000);
generator.writeNumberField("feeder.okcount", okCount);
generator.writeNumberField("feeder.errorcount", errorCount);
generator.writeNumberField("feeder.throughput", throughput);
generator.writeNumberField("feeder.minlatency", stats.minLatencyMillis());
generator.writeNumberField("feeder.avglatency", stats.averageLatencyMillis());
generator.writeNumberField("feeder.maxlatency", stats.maxLatencyMillis());
generator.writeNumberField("feeder.bytessent", stats.bytesSent());
generator.writeNumberField("feeder.bytesreceived", stats.bytesReceived());
generator.writeObjectFieldStart("feeder.responsecodes");
for (Map.Entry<Integer, Long> entry : stats.responsesByCode().entrySet())
generator.writeNumberField(Integer.toString(entry.getKey()), entry.getValue());
generator.writeEndObject();
generator.writeEndObject();
}
}
} |
Can this be replaced with `AtomicReference.compareAndSet`? There is a potential race here if `complete()` is invoked concurrently from multiple threads. | void complete() {
completion.get().run();
if (dependency.get() != null) dependency.getAndSet(null).complete();
} | if (dependency.get() != null) dependency.getAndSet(null).complete(); | void complete() {
completion.get().run();
RetriableFuture<T> toComplete = dependency.getAndSet(null);
if (toComplete != null) toComplete.complete();
} | class RetriableFuture<T> extends CompletableFuture<T> {
private final AtomicReference<Runnable> completion = new AtomicReference<>();
private final AtomicReference<RetriableFuture<T>> dependency = new AtomicReference<>();
private RetriableFuture() {
completion.set(() -> completeExceptionally(new FeedException("Operation aborted")));
}
/** Complete now with the last result or error. */
/** Ensures the dependency is completed whenever this is. */
void dependOn(RetriableFuture<T> dependency) {
this.dependency.set(dependency);
if (isDone()) dependency.complete();
}
/** Set the result of the last attempt at completing the computation represented by this. */
void set(T result, Throwable thrown) {
completion.set(thrown != null ? () -> completeExceptionally(thrown)
: () -> complete(result));
}
} | class RetriableFuture<T> extends CompletableFuture<T> {
private final AtomicReference<Runnable> completion = new AtomicReference<>();
private final AtomicReference<RetriableFuture<T>> dependency = new AtomicReference<>();
private RetriableFuture() {
completion.set(() -> completeExceptionally(new FeedException("Operation aborted")));
}
/** Complete now with the last result or error. */
/** Ensures the dependency is completed whenever this is. */
void dependOn(RetriableFuture<T> dependency) {
this.dependency.set(dependency);
if (isDone()) dependency.complete();
}
/** Set the result of the last attempt at completing the computation represented by this. */
void set(T result, Throwable thrown) {
completion.set(thrown != null ? () -> completeExceptionally(thrown)
: () -> complete(result));
}
} |
Don't know what current should be, except that it shouldn't be `null` >_< The race is handled inside `CompletableFuture`. | void complete() {
completion.get().run();
if (dependency.get() != null) dependency.getAndSet(null).complete();
} | if (dependency.get() != null) dependency.getAndSet(null).complete(); | void complete() {
completion.get().run();
RetriableFuture<T> toComplete = dependency.getAndSet(null);
if (toComplete != null) toComplete.complete();
} | class RetriableFuture<T> extends CompletableFuture<T> {
private final AtomicReference<Runnable> completion = new AtomicReference<>();
private final AtomicReference<RetriableFuture<T>> dependency = new AtomicReference<>();
private RetriableFuture() {
completion.set(() -> completeExceptionally(new FeedException("Operation aborted")));
}
/** Complete now with the last result or error. */
/** Ensures the dependency is completed whenever this is. */
void dependOn(RetriableFuture<T> dependency) {
this.dependency.set(dependency);
if (isDone()) dependency.complete();
}
/** Set the result of the last attempt at completing the computation represented by this. */
void set(T result, Throwable thrown) {
completion.set(thrown != null ? () -> completeExceptionally(thrown)
: () -> complete(result));
}
} | class RetriableFuture<T> extends CompletableFuture<T> {
private final AtomicReference<Runnable> completion = new AtomicReference<>();
private final AtomicReference<RetriableFuture<T>> dependency = new AtomicReference<>();
private RetriableFuture() {
completion.set(() -> completeExceptionally(new FeedException("Operation aborted")));
}
/** Complete now with the last result or error. */
/** Ensures the dependency is completed whenever this is. */
void dependOn(RetriableFuture<T> dependency) {
this.dependency.set(dependency);
if (isDone()) dependency.complete();
}
/** Set the result of the last attempt at completing the computation represented by this. */
void set(T result, Throwable thrown) {
completion.set(thrown != null ? () -> completeExceptionally(thrown)
: () -> complete(result));
}
} |
Ah ... you mean to avoid NPEs. Good point! Will fix. | void complete() {
completion.get().run();
if (dependency.get() != null) dependency.getAndSet(null).complete();
} | if (dependency.get() != null) dependency.getAndSet(null).complete(); | void complete() {
completion.get().run();
RetriableFuture<T> toComplete = dependency.getAndSet(null);
if (toComplete != null) toComplete.complete();
} | class RetriableFuture<T> extends CompletableFuture<T> {
private final AtomicReference<Runnable> completion = new AtomicReference<>();
private final AtomicReference<RetriableFuture<T>> dependency = new AtomicReference<>();
private RetriableFuture() {
completion.set(() -> completeExceptionally(new FeedException("Operation aborted")));
}
/** Complete now with the last result or error. */
/** Ensures the dependency is completed whenever this is. */
void dependOn(RetriableFuture<T> dependency) {
this.dependency.set(dependency);
if (isDone()) dependency.complete();
}
/** Set the result of the last attempt at completing the computation represented by this. */
void set(T result, Throwable thrown) {
completion.set(thrown != null ? () -> completeExceptionally(thrown)
: () -> complete(result));
}
} | class RetriableFuture<T> extends CompletableFuture<T> {
private final AtomicReference<Runnable> completion = new AtomicReference<>();
private final AtomicReference<RetriableFuture<T>> dependency = new AtomicReference<>();
private RetriableFuture() {
completion.set(() -> completeExceptionally(new FeedException("Operation aborted")));
}
/** Complete now with the last result or error. */
/** Ensures the dependency is completed whenever this is. */
void dependOn(RetriableFuture<T> dependency) {
this.dependency.set(dependency);
if (isDone()) dependency.complete();
}
/** Set the result of the last attempt at completing the computation represented by this. */
void set(T result, Throwable thrown) {
completion.set(thrown != null ? () -> completeExceptionally(thrown)
: () -> complete(result));
}
} |
Shouldn't this line be removed? | public boolean allows(ValidationId validationId, Instant now) {
validate(now);
for (Allow override : overrides) {
if (override.allows(validationId, now))
return true;
}
return false;
} | validate(now); | public boolean allows(ValidationId validationId, Instant now) {
for (Allow override : overrides) {
if (override.allows(validationId, now))
return true;
}
return false;
} | class ValidationOverrides {
public static final ValidationOverrides empty = new ValidationOverrides(ImmutableList.of(), "<validation-overrides/>");
/** A special instance which behaves as if it contained a valid allow override for every (valid) validation id */
public static final ValidationOverrides all = new AllowAllValidationOverrides();
private final List<Allow> overrides;
private final String xmlForm;
/** Creates a validation overrides which does not have an xml form */
public ValidationOverrides(List<Allow> overrides) {
this(overrides, null);
}
private ValidationOverrides(List<Allow> overrides, String xmlForm) {
this.overrides = ImmutableList.copyOf(overrides);
this.xmlForm = xmlForm;
}
/** Throws a ValidationException unless all given validation is overridden at this time */
public void invalid(Map<ValidationId, ? extends Collection<String>> messagesByValidationId, Instant now) {
Map<ValidationId, Collection<String>> disallowed = new HashMap<>(messagesByValidationId);
disallowed.keySet().removeIf(id -> allows(id, now));
if ( ! disallowed.isEmpty())
throw new ValidationException(disallowed);
}
/** Throws a ValidationException unless this validation is overridden at this time */
public void invalid(ValidationId validationId, String message, Instant now) {
if ( ! allows(validationId, now))
throw new ValidationException(validationId, message);
}
public boolean allows(String validationIdString, Instant now) {
Optional<ValidationId> validationId = ValidationId.from(validationIdString);
if (validationId.isEmpty()) return false;
return allows(validationId.get(), now);
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
/** Validates overrides (checks 'until' date') */
public boolean validate(Instant now) {
for (Allow override : overrides) {
if (now.plus(Duration.ofDays(30)).isBefore(override.until))
throw new IllegalArgumentException("validation-overrides is invalid: " + override +
" is too far in the future: Max 30 days is allowed");
}
return false;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
public static String toAllowMessage(ValidationId id) {
return "To allow this add <allow until='yyyy-mm-dd'>" + id + "</allow> to validation-overrides.xml" +
", see https:
}
/**
* Returns a ValidationOverrides instance with the content of the given Reader.
*
* @param reader the reader containing a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
} catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Returns a ValidationOverrides instance with the content of the given XML string.
* An empty ValidationOverrides is returned if the argument is empty.
*
* @param xmlForm the string which optionally contains a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(String xmlForm) {
if ( xmlForm.isEmpty()) return ValidationOverrides.empty;
Element root = XML.getDocument(xmlForm).getDocumentElement();
List<ValidationOverrides.Allow> overrides = new ArrayList<>();
for (Element allow : XML.getChildren(root, "allow")) {
Instant until = LocalDate.parse(allow.getAttribute("until"), DateTimeFormatter.ISO_DATE)
.atStartOfDay().atZone(ZoneOffset.UTC).toInstant()
.plus(Duration.ofDays(1));
Optional<ValidationId> validationId = ValidationId.from(XML.getValue(allow));
validationId.ifPresent(id -> overrides.add(new Allow(id, until)));
}
return new ValidationOverrides(overrides, xmlForm);
}
/** A validation override which allows a particular change. Immutable. */
public static class Allow {
private final ValidationId validationId;
private final Instant until;
public Allow(ValidationId validationId, Instant until) {
this.validationId = validationId;
this.until = until;
}
public boolean allows(ValidationId validationId, Instant now) {
return this.validationId.equals(validationId) && now.isBefore(until);
}
@Override
public String toString() { return "allow '" + validationId + "' until " + until; }
}
/**
* A deployment validation exception.
* Deployment validations can be {@link ValidationOverrides overridden} based on their id.
*/
public static class ValidationException extends IllegalArgumentException {
static final long serialVersionUID = 789984668;
private ValidationException(ValidationId validationId, String message) {
super(validationId + ": " + message + ". " + toAllowMessage(validationId));
}
private ValidationException(Map<ValidationId, Collection<String>> messagesById) {
super(messagesById.entrySet().stream()
.map(messages -> messages.getKey() + ":\n\t" +
String.join("\n\t", messages.getValue()) + "\n" +
toAllowMessage(messages.getKey()))
.collect(Collectors.joining("\n")));
}
}
public static class AllowAllValidationOverrides extends ValidationOverrides {
private final DeployLogger logger;
private final ValidationOverrides wrapped;
/** Create an instance of this which doesn't log */
public AllowAllValidationOverrides() {
this(null, null);
}
/** Creates an instance of this which logs what is allows to the given deploy logger */
public AllowAllValidationOverrides(ValidationOverrides wrapped, DeployLogger logger) {
super(List.of());
this.wrapped = wrapped;
this.logger = logger;
}
@Override
public void invalid(ValidationId validationId, String message, Instant now) {
if (wrapped != null && logger != null && ! wrapped.allows(validationId, now))
logger.log(Level.WARNING, "Possibly destructive change '" + validationId + "' allowed");
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
@Override
public boolean allows(ValidationId validationId, Instant now) {
return true;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
@Override
public String xmlForm() { return null; }
@Override
public String toString() { return "(A validation override which allows everything)"; }
}
} | class ValidationOverrides {
public static final ValidationOverrides empty = new ValidationOverrides(ImmutableList.of(), "<validation-overrides/>");
/** A special instance which behaves as if it contained a valid allow override for every (valid) validation id */
public static final ValidationOverrides all = new AllowAllValidationOverrides();
private final List<Allow> overrides;
private final String xmlForm;
/** Creates a validation overrides which does not have an xml form */
public ValidationOverrides(List<Allow> overrides) {
this(overrides, null);
}
private ValidationOverrides(List<Allow> overrides, String xmlForm) {
this.overrides = ImmutableList.copyOf(overrides);
this.xmlForm = xmlForm;
}
/** Throws a ValidationException unless all given validation is overridden at this time */
public void invalid(Map<ValidationId, ? extends Collection<String>> messagesByValidationId, Instant now) {
Map<ValidationId, Collection<String>> disallowed = new HashMap<>(messagesByValidationId);
disallowed.keySet().removeIf(id -> allows(id, now));
if ( ! disallowed.isEmpty())
throw new ValidationException(disallowed);
}
/** Throws a ValidationException unless this validation is overridden at this time */
public void invalid(ValidationId validationId, String message, Instant now) {
if ( ! allows(validationId, now))
throw new ValidationException(validationId, message);
}
public boolean allows(String validationIdString, Instant now) {
Optional<ValidationId> validationId = ValidationId.from(validationIdString);
if (validationId.isEmpty()) return false;
return allows(validationId.get(), now);
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
/** Validates overrides (checks 'until' date') */
public boolean validate(Instant now) {
for (Allow override : overrides) {
if (now.plus(Duration.ofDays(30)).isBefore(override.until))
throw new IllegalArgumentException("validation-overrides is invalid: " + override +
" is too far in the future: Max 30 days is allowed");
}
return false;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
public static String toAllowMessage(ValidationId id) {
return "To allow this add <allow until='yyyy-mm-dd'>" + id + "</allow> to validation-overrides.xml" +
", see https:
}
/**
* Returns a ValidationOverrides instance with the content of the given Reader.
*
* @param reader the reader containing a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
} catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Returns a ValidationOverrides instance with the content of the given XML string.
* An empty ValidationOverrides is returned if the argument is empty.
*
* @param xmlForm the string which optionally contains a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(String xmlForm) {
if ( xmlForm.isEmpty()) return ValidationOverrides.empty;
Element root = XML.getDocument(xmlForm).getDocumentElement();
List<ValidationOverrides.Allow> overrides = new ArrayList<>();
for (Element allow : XML.getChildren(root, "allow")) {
Instant until = LocalDate.parse(allow.getAttribute("until"), DateTimeFormatter.ISO_DATE)
.atStartOfDay().atZone(ZoneOffset.UTC).toInstant()
.plus(Duration.ofDays(1));
Optional<ValidationId> validationId = ValidationId.from(XML.getValue(allow));
validationId.ifPresent(id -> overrides.add(new Allow(id, until)));
}
return new ValidationOverrides(overrides, xmlForm);
}
/** A validation override which allows a particular change. Immutable. */
public static class Allow {
private final ValidationId validationId;
private final Instant until;
public Allow(ValidationId validationId, Instant until) {
this.validationId = validationId;
this.until = until;
}
public boolean allows(ValidationId validationId, Instant now) {
return this.validationId.equals(validationId) && now.isBefore(until);
}
@Override
public String toString() { return "allow '" + validationId + "' until " + until; }
}
/**
* A deployment validation exception.
* Deployment validations can be {@link ValidationOverrides overridden} based on their id.
*/
public static class ValidationException extends IllegalArgumentException {
static final long serialVersionUID = 789984668;
private ValidationException(ValidationId validationId, String message) {
super(validationId + ": " + message + ". " + toAllowMessage(validationId));
}
private ValidationException(Map<ValidationId, Collection<String>> messagesById) {
super(messagesById.entrySet().stream()
.map(messages -> messages.getKey() + ":\n\t" +
String.join("\n\t", messages.getValue()) + "\n" +
toAllowMessage(messages.getKey()))
.collect(Collectors.joining("\n")));
}
}
public static class AllowAllValidationOverrides extends ValidationOverrides {
private final DeployLogger logger;
private final ValidationOverrides wrapped;
/** Create an instance of this which doesn't log */
public AllowAllValidationOverrides() {
this(null, null);
}
/** Creates an instance of this which logs what is allows to the given deploy logger */
public AllowAllValidationOverrides(ValidationOverrides wrapped, DeployLogger logger) {
super(List.of());
this.wrapped = wrapped;
this.logger = logger;
}
@Override
public void invalid(ValidationId validationId, String message, Instant now) {
if (wrapped != null && logger != null && ! wrapped.allows(validationId, now))
logger.log(Level.WARNING, "Possibly destructive change '" + validationId + "' allowed");
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
@Override
public boolean allows(ValidationId validationId, Instant now) {
return true;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
@Override
public String xmlForm() { return null; }
@Override
public String toString() { return "(A validation override which allows everything)"; }
}
} |
Oops, yes, that should not be there | public boolean allows(ValidationId validationId, Instant now) {
validate(now);
for (Allow override : overrides) {
if (override.allows(validationId, now))
return true;
}
return false;
} | validate(now); | public boolean allows(ValidationId validationId, Instant now) {
for (Allow override : overrides) {
if (override.allows(validationId, now))
return true;
}
return false;
} | class ValidationOverrides {
public static final ValidationOverrides empty = new ValidationOverrides(ImmutableList.of(), "<validation-overrides/>");
/** A special instance which behaves as if it contained a valid allow override for every (valid) validation id */
public static final ValidationOverrides all = new AllowAllValidationOverrides();
private final List<Allow> overrides;
private final String xmlForm;
/** Creates a validation overrides which does not have an xml form */
public ValidationOverrides(List<Allow> overrides) {
this(overrides, null);
}
private ValidationOverrides(List<Allow> overrides, String xmlForm) {
this.overrides = ImmutableList.copyOf(overrides);
this.xmlForm = xmlForm;
}
/** Throws a ValidationException unless all given validation is overridden at this time */
public void invalid(Map<ValidationId, ? extends Collection<String>> messagesByValidationId, Instant now) {
Map<ValidationId, Collection<String>> disallowed = new HashMap<>(messagesByValidationId);
disallowed.keySet().removeIf(id -> allows(id, now));
if ( ! disallowed.isEmpty())
throw new ValidationException(disallowed);
}
/** Throws a ValidationException unless this validation is overridden at this time */
public void invalid(ValidationId validationId, String message, Instant now) {
if ( ! allows(validationId, now))
throw new ValidationException(validationId, message);
}
public boolean allows(String validationIdString, Instant now) {
Optional<ValidationId> validationId = ValidationId.from(validationIdString);
if (validationId.isEmpty()) return false;
return allows(validationId.get(), now);
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
/** Validates overrides (checks 'until' date') */
public boolean validate(Instant now) {
for (Allow override : overrides) {
if (now.plus(Duration.ofDays(30)).isBefore(override.until))
throw new IllegalArgumentException("validation-overrides is invalid: " + override +
" is too far in the future: Max 30 days is allowed");
}
return false;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
public static String toAllowMessage(ValidationId id) {
return "To allow this add <allow until='yyyy-mm-dd'>" + id + "</allow> to validation-overrides.xml" +
", see https:
}
/**
* Returns a ValidationOverrides instance with the content of the given Reader.
*
* @param reader the reader containing a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
} catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Returns a ValidationOverrides instance with the content of the given XML string.
* An empty ValidationOverrides is returned if the argument is empty.
*
* @param xmlForm the string which optionally contains a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(String xmlForm) {
if ( xmlForm.isEmpty()) return ValidationOverrides.empty;
Element root = XML.getDocument(xmlForm).getDocumentElement();
List<ValidationOverrides.Allow> overrides = new ArrayList<>();
for (Element allow : XML.getChildren(root, "allow")) {
Instant until = LocalDate.parse(allow.getAttribute("until"), DateTimeFormatter.ISO_DATE)
.atStartOfDay().atZone(ZoneOffset.UTC).toInstant()
.plus(Duration.ofDays(1));
Optional<ValidationId> validationId = ValidationId.from(XML.getValue(allow));
validationId.ifPresent(id -> overrides.add(new Allow(id, until)));
}
return new ValidationOverrides(overrides, xmlForm);
}
/** A validation override which allows a particular change. Immutable. */
public static class Allow {
private final ValidationId validationId;
private final Instant until;
public Allow(ValidationId validationId, Instant until) {
this.validationId = validationId;
this.until = until;
}
public boolean allows(ValidationId validationId, Instant now) {
return this.validationId.equals(validationId) && now.isBefore(until);
}
@Override
public String toString() { return "allow '" + validationId + "' until " + until; }
}
/**
* A deployment validation exception.
* Deployment validations can be {@link ValidationOverrides overridden} based on their id.
*/
public static class ValidationException extends IllegalArgumentException {
static final long serialVersionUID = 789984668;
private ValidationException(ValidationId validationId, String message) {
super(validationId + ": " + message + ". " + toAllowMessage(validationId));
}
private ValidationException(Map<ValidationId, Collection<String>> messagesById) {
super(messagesById.entrySet().stream()
.map(messages -> messages.getKey() + ":\n\t" +
String.join("\n\t", messages.getValue()) + "\n" +
toAllowMessage(messages.getKey()))
.collect(Collectors.joining("\n")));
}
}
public static class AllowAllValidationOverrides extends ValidationOverrides {
private final DeployLogger logger;
private final ValidationOverrides wrapped;
/** Create an instance of this which doesn't log */
public AllowAllValidationOverrides() {
this(null, null);
}
/** Creates an instance of this which logs what is allows to the given deploy logger */
public AllowAllValidationOverrides(ValidationOverrides wrapped, DeployLogger logger) {
super(List.of());
this.wrapped = wrapped;
this.logger = logger;
}
@Override
public void invalid(ValidationId validationId, String message, Instant now) {
if (wrapped != null && logger != null && ! wrapped.allows(validationId, now))
logger.log(Level.WARNING, "Possibly destructive change '" + validationId + "' allowed");
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
@Override
public boolean allows(ValidationId validationId, Instant now) {
return true;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
@Override
public String xmlForm() { return null; }
@Override
public String toString() { return "(A validation override which allows everything)"; }
}
} | class ValidationOverrides {
public static final ValidationOverrides empty = new ValidationOverrides(ImmutableList.of(), "<validation-overrides/>");
/** A special instance which behaves as if it contained a valid allow override for every (valid) validation id */
public static final ValidationOverrides all = new AllowAllValidationOverrides();
private final List<Allow> overrides;
private final String xmlForm;
/** Creates a validation overrides which does not have an xml form */
public ValidationOverrides(List<Allow> overrides) {
this(overrides, null);
}
private ValidationOverrides(List<Allow> overrides, String xmlForm) {
this.overrides = ImmutableList.copyOf(overrides);
this.xmlForm = xmlForm;
}
/** Throws a ValidationException unless all given validation is overridden at this time */
public void invalid(Map<ValidationId, ? extends Collection<String>> messagesByValidationId, Instant now) {
Map<ValidationId, Collection<String>> disallowed = new HashMap<>(messagesByValidationId);
disallowed.keySet().removeIf(id -> allows(id, now));
if ( ! disallowed.isEmpty())
throw new ValidationException(disallowed);
}
/** Throws a ValidationException unless this validation is overridden at this time */
public void invalid(ValidationId validationId, String message, Instant now) {
if ( ! allows(validationId, now))
throw new ValidationException(validationId, message);
}
public boolean allows(String validationIdString, Instant now) {
Optional<ValidationId> validationId = ValidationId.from(validationIdString);
if (validationId.isEmpty()) return false;
return allows(validationId.get(), now);
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
/** Validates overrides (checks 'until' date') */
public boolean validate(Instant now) {
for (Allow override : overrides) {
if (now.plus(Duration.ofDays(30)).isBefore(override.until))
throw new IllegalArgumentException("validation-overrides is invalid: " + override +
" is too far in the future: Max 30 days is allowed");
}
return false;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
public static String toAllowMessage(ValidationId id) {
return "To allow this add <allow until='yyyy-mm-dd'>" + id + "</allow> to validation-overrides.xml" +
", see https:
}
/**
* Returns a ValidationOverrides instance with the content of the given Reader.
*
* @param reader the reader containing a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
} catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Returns a ValidationOverrides instance with the content of the given XML string.
* An empty ValidationOverrides is returned if the argument is empty.
*
* @param xmlForm the string which optionally contains a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(String xmlForm) {
if ( xmlForm.isEmpty()) return ValidationOverrides.empty;
Element root = XML.getDocument(xmlForm).getDocumentElement();
List<ValidationOverrides.Allow> overrides = new ArrayList<>();
for (Element allow : XML.getChildren(root, "allow")) {
Instant until = LocalDate.parse(allow.getAttribute("until"), DateTimeFormatter.ISO_DATE)
.atStartOfDay().atZone(ZoneOffset.UTC).toInstant()
.plus(Duration.ofDays(1));
Optional<ValidationId> validationId = ValidationId.from(XML.getValue(allow));
validationId.ifPresent(id -> overrides.add(new Allow(id, until)));
}
return new ValidationOverrides(overrides, xmlForm);
}
/** A validation override which allows a particular change. Immutable. */
public static class Allow {
private final ValidationId validationId;
private final Instant until;
public Allow(ValidationId validationId, Instant until) {
this.validationId = validationId;
this.until = until;
}
public boolean allows(ValidationId validationId, Instant now) {
return this.validationId.equals(validationId) && now.isBefore(until);
}
@Override
public String toString() { return "allow '" + validationId + "' until " + until; }
}
/**
* A deployment validation exception.
* Deployment validations can be {@link ValidationOverrides overridden} based on their id.
*/
public static class ValidationException extends IllegalArgumentException {
static final long serialVersionUID = 789984668;
private ValidationException(ValidationId validationId, String message) {
super(validationId + ": " + message + ". " + toAllowMessage(validationId));
}
private ValidationException(Map<ValidationId, Collection<String>> messagesById) {
super(messagesById.entrySet().stream()
.map(messages -> messages.getKey() + ":\n\t" +
String.join("\n\t", messages.getValue()) + "\n" +
toAllowMessage(messages.getKey()))
.collect(Collectors.joining("\n")));
}
}
public static class AllowAllValidationOverrides extends ValidationOverrides {
private final DeployLogger logger;
private final ValidationOverrides wrapped;
/** Create an instance of this which doesn't log */
public AllowAllValidationOverrides() {
this(null, null);
}
/** Creates an instance of this which logs what is allows to the given deploy logger */
public AllowAllValidationOverrides(ValidationOverrides wrapped, DeployLogger logger) {
super(List.of());
this.wrapped = wrapped;
this.logger = logger;
}
@Override
public void invalid(ValidationId validationId, String message, Instant now) {
if (wrapped != null && logger != null && ! wrapped.allows(validationId, now))
logger.log(Level.WARNING, "Possibly destructive change '" + validationId + "' allowed");
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
@Override
public boolean allows(ValidationId validationId, Instant now) {
return true;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
@Override
public String xmlForm() { return null; }
@Override
public String toString() { return "(A validation override which allows everything)"; }
}
} |
Fixed | public boolean allows(ValidationId validationId, Instant now) {
validate(now);
for (Allow override : overrides) {
if (override.allows(validationId, now))
return true;
}
return false;
} | validate(now); | public boolean allows(ValidationId validationId, Instant now) {
for (Allow override : overrides) {
if (override.allows(validationId, now))
return true;
}
return false;
} | class ValidationOverrides {
public static final ValidationOverrides empty = new ValidationOverrides(ImmutableList.of(), "<validation-overrides/>");
/** A special instance which behaves as if it contained a valid allow override for every (valid) validation id */
public static final ValidationOverrides all = new AllowAllValidationOverrides();
private final List<Allow> overrides;
private final String xmlForm;
/** Creates a validation overrides which does not have an xml form */
public ValidationOverrides(List<Allow> overrides) {
this(overrides, null);
}
private ValidationOverrides(List<Allow> overrides, String xmlForm) {
this.overrides = ImmutableList.copyOf(overrides);
this.xmlForm = xmlForm;
}
/** Throws a ValidationException unless all given validation is overridden at this time */
public void invalid(Map<ValidationId, ? extends Collection<String>> messagesByValidationId, Instant now) {
Map<ValidationId, Collection<String>> disallowed = new HashMap<>(messagesByValidationId);
disallowed.keySet().removeIf(id -> allows(id, now));
if ( ! disallowed.isEmpty())
throw new ValidationException(disallowed);
}
/** Throws a ValidationException unless this validation is overridden at this time */
public void invalid(ValidationId validationId, String message, Instant now) {
if ( ! allows(validationId, now))
throw new ValidationException(validationId, message);
}
public boolean allows(String validationIdString, Instant now) {
Optional<ValidationId> validationId = ValidationId.from(validationIdString);
if (validationId.isEmpty()) return false;
return allows(validationId.get(), now);
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
/** Validates overrides (checks 'until' date') */
public boolean validate(Instant now) {
for (Allow override : overrides) {
if (now.plus(Duration.ofDays(30)).isBefore(override.until))
throw new IllegalArgumentException("validation-overrides is invalid: " + override +
" is too far in the future: Max 30 days is allowed");
}
return false;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
public static String toAllowMessage(ValidationId id) {
return "To allow this add <allow until='yyyy-mm-dd'>" + id + "</allow> to validation-overrides.xml" +
", see https:
}
/**
* Returns a ValidationOverrides instance with the content of the given Reader.
*
* @param reader the reader containing a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
} catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Returns a ValidationOverrides instance with the content of the given XML string.
* An empty ValidationOverrides is returned if the argument is empty.
*
* @param xmlForm the string which optionally contains a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(String xmlForm) {
if ( xmlForm.isEmpty()) return ValidationOverrides.empty;
Element root = XML.getDocument(xmlForm).getDocumentElement();
List<ValidationOverrides.Allow> overrides = new ArrayList<>();
for (Element allow : XML.getChildren(root, "allow")) {
Instant until = LocalDate.parse(allow.getAttribute("until"), DateTimeFormatter.ISO_DATE)
.atStartOfDay().atZone(ZoneOffset.UTC).toInstant()
.plus(Duration.ofDays(1));
Optional<ValidationId> validationId = ValidationId.from(XML.getValue(allow));
validationId.ifPresent(id -> overrides.add(new Allow(id, until)));
}
return new ValidationOverrides(overrides, xmlForm);
}
/** A validation override which allows a particular change. Immutable. */
public static class Allow {
private final ValidationId validationId;
private final Instant until;
public Allow(ValidationId validationId, Instant until) {
this.validationId = validationId;
this.until = until;
}
public boolean allows(ValidationId validationId, Instant now) {
return this.validationId.equals(validationId) && now.isBefore(until);
}
@Override
public String toString() { return "allow '" + validationId + "' until " + until; }
}
/**
* A deployment validation exception.
* Deployment validations can be {@link ValidationOverrides overridden} based on their id.
*/
public static class ValidationException extends IllegalArgumentException {
static final long serialVersionUID = 789984668;
private ValidationException(ValidationId validationId, String message) {
super(validationId + ": " + message + ". " + toAllowMessage(validationId));
}
private ValidationException(Map<ValidationId, Collection<String>> messagesById) {
super(messagesById.entrySet().stream()
.map(messages -> messages.getKey() + ":\n\t" +
String.join("\n\t", messages.getValue()) + "\n" +
toAllowMessage(messages.getKey()))
.collect(Collectors.joining("\n")));
}
}
public static class AllowAllValidationOverrides extends ValidationOverrides {
private final DeployLogger logger;
private final ValidationOverrides wrapped;
/** Create an instance of this which doesn't log */
public AllowAllValidationOverrides() {
this(null, null);
}
/** Creates an instance of this which logs what is allows to the given deploy logger */
public AllowAllValidationOverrides(ValidationOverrides wrapped, DeployLogger logger) {
super(List.of());
this.wrapped = wrapped;
this.logger = logger;
}
@Override
public void invalid(ValidationId validationId, String message, Instant now) {
if (wrapped != null && logger != null && ! wrapped.allows(validationId, now))
logger.log(Level.WARNING, "Possibly destructive change '" + validationId + "' allowed");
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
@Override
public boolean allows(ValidationId validationId, Instant now) {
return true;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
@Override
public String xmlForm() { return null; }
@Override
public String toString() { return "(A validation override which allows everything)"; }
}
} | class ValidationOverrides {
public static final ValidationOverrides empty = new ValidationOverrides(ImmutableList.of(), "<validation-overrides/>");
/** A special instance which behaves as if it contained a valid allow override for every (valid) validation id */
public static final ValidationOverrides all = new AllowAllValidationOverrides();
private final List<Allow> overrides;
private final String xmlForm;
/** Creates a validation overrides which does not have an xml form */
public ValidationOverrides(List<Allow> overrides) {
this(overrides, null);
}
private ValidationOverrides(List<Allow> overrides, String xmlForm) {
this.overrides = ImmutableList.copyOf(overrides);
this.xmlForm = xmlForm;
}
/** Throws a ValidationException unless all given validation is overridden at this time */
public void invalid(Map<ValidationId, ? extends Collection<String>> messagesByValidationId, Instant now) {
Map<ValidationId, Collection<String>> disallowed = new HashMap<>(messagesByValidationId);
disallowed.keySet().removeIf(id -> allows(id, now));
if ( ! disallowed.isEmpty())
throw new ValidationException(disallowed);
}
/** Throws a ValidationException unless this validation is overridden at this time */
public void invalid(ValidationId validationId, String message, Instant now) {
if ( ! allows(validationId, now))
throw new ValidationException(validationId, message);
}
public boolean allows(String validationIdString, Instant now) {
Optional<ValidationId> validationId = ValidationId.from(validationIdString);
if (validationId.isEmpty()) return false;
return allows(validationId.get(), now);
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
/** Validates overrides (checks 'until' date') */
public boolean validate(Instant now) {
for (Allow override : overrides) {
if (now.plus(Duration.ofDays(30)).isBefore(override.until))
throw new IllegalArgumentException("validation-overrides is invalid: " + override +
" is too far in the future: Max 30 days is allowed");
}
return false;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
public static String toAllowMessage(ValidationId id) {
return "To allow this add <allow until='yyyy-mm-dd'>" + id + "</allow> to validation-overrides.xml" +
", see https:
}
/**
* Returns a ValidationOverrides instance with the content of the given Reader.
*
* @param reader the reader containing a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
} catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Returns a ValidationOverrides instance with the content of the given XML string.
* An empty ValidationOverrides is returned if the argument is empty.
*
* @param xmlForm the string which optionally contains a validation-overrides XML structure
* @return a ValidationOverrides from the argument
* @throws IllegalArgumentException if the validation-allows.xml file exists but is invalid
*/
public static ValidationOverrides fromXml(String xmlForm) {
if ( xmlForm.isEmpty()) return ValidationOverrides.empty;
Element root = XML.getDocument(xmlForm).getDocumentElement();
List<ValidationOverrides.Allow> overrides = new ArrayList<>();
for (Element allow : XML.getChildren(root, "allow")) {
Instant until = LocalDate.parse(allow.getAttribute("until"), DateTimeFormatter.ISO_DATE)
.atStartOfDay().atZone(ZoneOffset.UTC).toInstant()
.plus(Duration.ofDays(1));
Optional<ValidationId> validationId = ValidationId.from(XML.getValue(allow));
validationId.ifPresent(id -> overrides.add(new Allow(id, until)));
}
return new ValidationOverrides(overrides, xmlForm);
}
/** A validation override which allows a particular change. Immutable. */
public static class Allow {
private final ValidationId validationId;
private final Instant until;
public Allow(ValidationId validationId, Instant until) {
this.validationId = validationId;
this.until = until;
}
public boolean allows(ValidationId validationId, Instant now) {
return this.validationId.equals(validationId) && now.isBefore(until);
}
@Override
public String toString() { return "allow '" + validationId + "' until " + until; }
}
/**
* A deployment validation exception.
* Deployment validations can be {@link ValidationOverrides overridden} based on their id.
*/
public static class ValidationException extends IllegalArgumentException {
static final long serialVersionUID = 789984668;
private ValidationException(ValidationId validationId, String message) {
super(validationId + ": " + message + ". " + toAllowMessage(validationId));
}
private ValidationException(Map<ValidationId, Collection<String>> messagesById) {
super(messagesById.entrySet().stream()
.map(messages -> messages.getKey() + ":\n\t" +
String.join("\n\t", messages.getValue()) + "\n" +
toAllowMessage(messages.getKey()))
.collect(Collectors.joining("\n")));
}
}
public static class AllowAllValidationOverrides extends ValidationOverrides {
private final DeployLogger logger;
private final ValidationOverrides wrapped;
/** Create an instance of this which doesn't log */
public AllowAllValidationOverrides() {
this(null, null);
}
/** Creates an instance of this which logs what is allows to the given deploy logger */
public AllowAllValidationOverrides(ValidationOverrides wrapped, DeployLogger logger) {
super(List.of());
this.wrapped = wrapped;
this.logger = logger;
}
@Override
public void invalid(ValidationId validationId, String message, Instant now) {
if (wrapped != null && logger != null && ! wrapped.allows(validationId, now))
logger.log(Level.WARNING, "Possibly destructive change '" + validationId + "' allowed");
}
/** Returns whether the given (assumed invalid) change is allowed by this at the moment */
@Override
public boolean allows(ValidationId validationId, Instant now) {
return true;
}
/** Returns the XML form of this, or null if it was not created by fromXml, nor is empty */
@Override
public String xmlForm() { return null; }
@Override
public String toString() { return "(A validation override which allows everything)"; }
}
} |
If we have just a few documents we can easily end up with a deviation which is more than 1 and 10%. For example, with 2 nodes and 2 documents we have a 50% chance of both documents landing on the same node, and with that isDeviationSmall will still be false. There is some stats formula that will do this right for us, but since we just use it to decide whether to turn on the hits per node reduction optimization, which shouldn't be used unless there's a lot of data anyway, we could also just set isDeviationSmall to false unless the total document count is smaller than some number? | double maxUnbalance(long activeDocs) {
return Math.max(1, activeDocs * MAX_UNBALANCE);
} | return Math.max(1, activeDocs * MAX_UNBALANCE); | double maxUnbalance(long activeDocs) {
return Math.max(1, activeDocs * MAX_UNBALANCE);
} | class Group {
private final int id;
private final ImmutableList<Node> nodes;
private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true);
private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false);
private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true);
private final static double MAX_UNBALANCE = 0.10;
private static final Logger log = Logger.getLogger(Group.class.getName());
public Group(int id, List<Node> nodes) {
this.id = id;
this.nodes = ImmutableList.copyOf(nodes);
int idx = 0;
for(var node: nodes) {
node.setPathIndex(idx);
idx++;
}
}
/** Returns the unique identity of this group */
public int id() { return id; }
/** Returns the nodes in this group as an immutable list */
public ImmutableList<Node> nodes() { return nodes; }
/**
* Returns whether this group has sufficient active documents
* (compared to other groups) that is should receive traffic
*/
public boolean hasSufficientCoverage() {
return hasSufficientCoverage.get();
}
void setHasSufficientCoverage(boolean sufficientCoverage) {
hasSufficientCoverage.lazySet(sufficientCoverage);
}
public int workingNodes() {
return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count();
}
void aggregateNodeValues() {
long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum();
activeDocuments.set(activeDocs);
isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites));
int numWorkingNodes = workingNodes();
if (numWorkingNodes > 0) {
long average = activeDocs / numWorkingNodes;
long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum();
boolean isDeviationSmall = deviation <= maxUnbalance(activeDocs);
if ((!isContentWellBalanced.get() || isDeviationSmall != isContentWellBalanced.get()) && (activeDocs > 0)) {
log.info("Content is " + (isDeviationSmall ? "" : "not ") + "well balanced. Current deviation = " + deviation*100/activeDocs + " %" +
". activeDocs = " + activeDocs + ", deviation = " + deviation + ", average = " + average);
isContentWellBalanced.set(isDeviationSmall);
}
} else {
isContentWellBalanced.set(true);
}
}
/** Returns the active documents on this group. If unknown, 0 is returned. */
long getActiveDocuments() { return activeDocuments.get(); }
/** Returns whether any node in this group is currently blocking write operations */
public boolean isBlockingWrites() { return isBlockingWrites.get(); }
public boolean isContentWellBalanced() { return isContentWellBalanced.get(); }
public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) {
boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow);
return previousState != hasFullCoverageNow;
}
@Override
public String toString() { return "group " + id; }
@Override
public int hashCode() { return id; }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if (!(other instanceof Group)) return false;
return ((Group) other).id == this.id;
}
} | class Group {
private final int id;
private final ImmutableList<Node> nodes;
private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true);
private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false);
private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true);
private final static double MAX_UNBALANCE = 0.10;
private static final Logger log = Logger.getLogger(Group.class.getName());
public Group(int id, List<Node> nodes) {
this.id = id;
this.nodes = ImmutableList.copyOf(nodes);
int idx = 0;
for(var node: nodes) {
node.setPathIndex(idx);
idx++;
}
}
/** Returns the unique identity of this group */
public int id() { return id; }
/** Returns the nodes in this group as an immutable list */
public ImmutableList<Node> nodes() { return nodes; }
/**
* Returns whether this group has sufficient active documents
* (compared to other groups) that is should receive traffic
*/
public boolean hasSufficientCoverage() {
return hasSufficientCoverage.get();
}
void setHasSufficientCoverage(boolean sufficientCoverage) {
hasSufficientCoverage.lazySet(sufficientCoverage);
}
public int workingNodes() {
return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count();
}
void aggregateNodeValues() {
long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum();
activeDocuments.set(activeDocs);
isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites));
int numWorkingNodes = workingNodes();
if (numWorkingNodes > 0) {
long average = activeDocs / numWorkingNodes;
long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum();
boolean isDeviationSmall = deviation <= maxUnbalance(activeDocs);
if ((!isContentWellBalanced.get() || isDeviationSmall != isContentWellBalanced.get()) && (activeDocs > 0)) {
log.info("Content is " + (isDeviationSmall ? "" : "not ") + "well balanced. Current deviation = " + deviation*100/activeDocs + " %" +
". activeDocs = " + activeDocs + ", deviation = " + deviation + ", average = " + average);
isContentWellBalanced.set(isDeviationSmall);
}
} else {
isContentWellBalanced.set(true);
}
}
/** Returns the active documents on this group. If unknown, 0 is returned. */
long getActiveDocuments() { return activeDocuments.get(); }
/** Returns whether any node in this group is currently blocking write operations */
public boolean isBlockingWrites() { return isBlockingWrites.get(); }
public boolean isContentWellBalanced() { return isContentWellBalanced.get(); }
public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) {
boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow);
return previousState != hasFullCoverageNow;
}
@Override
public String toString() { return "group " + id; }
@Override
public int hashCode() { return id; }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if (!(other instanceof Group)) return false;
return ((Group) other).id == this.id;
}
} |
Yes, I thought about that, not knowing the code and the impact of these changes I didn't change it much. Your suggestions sounds reasonable, though. Any suggestion about what that "small number" should be? | double maxUnbalance(long activeDocs) {
return Math.max(1, activeDocs * MAX_UNBALANCE);
} | return Math.max(1, activeDocs * MAX_UNBALANCE); | double maxUnbalance(long activeDocs) {
return Math.max(1, activeDocs * MAX_UNBALANCE);
} | class Group {
private final int id;
private final ImmutableList<Node> nodes;
private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true);
private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false);
private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true);
private final static double MAX_UNBALANCE = 0.10;
private static final Logger log = Logger.getLogger(Group.class.getName());
public Group(int id, List<Node> nodes) {
this.id = id;
this.nodes = ImmutableList.copyOf(nodes);
int idx = 0;
for(var node: nodes) {
node.setPathIndex(idx);
idx++;
}
}
/** Returns the unique identity of this group */
public int id() { return id; }
/** Returns the nodes in this group as an immutable list */
public ImmutableList<Node> nodes() { return nodes; }
/**
* Returns whether this group has sufficient active documents
* (compared to other groups) that is should receive traffic
*/
public boolean hasSufficientCoverage() {
return hasSufficientCoverage.get();
}
void setHasSufficientCoverage(boolean sufficientCoverage) {
hasSufficientCoverage.lazySet(sufficientCoverage);
}
public int workingNodes() {
return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count();
}
void aggregateNodeValues() {
long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum();
activeDocuments.set(activeDocs);
isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites));
int numWorkingNodes = workingNodes();
if (numWorkingNodes > 0) {
long average = activeDocs / numWorkingNodes;
long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum();
boolean isDeviationSmall = deviation <= maxUnbalance(activeDocs);
if ((!isContentWellBalanced.get() || isDeviationSmall != isContentWellBalanced.get()) && (activeDocs > 0)) {
log.info("Content is " + (isDeviationSmall ? "" : "not ") + "well balanced. Current deviation = " + deviation*100/activeDocs + " %" +
". activeDocs = " + activeDocs + ", deviation = " + deviation + ", average = " + average);
isContentWellBalanced.set(isDeviationSmall);
}
} else {
isContentWellBalanced.set(true);
}
}
/** Returns the active documents on this group. If unknown, 0 is returned. */
long getActiveDocuments() { return activeDocuments.get(); }
/** Returns whether any node in this group is currently blocking write operations */
public boolean isBlockingWrites() { return isBlockingWrites.get(); }
public boolean isContentWellBalanced() { return isContentWellBalanced.get(); }
public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) {
boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow);
return previousState != hasFullCoverageNow;
}
@Override
public String toString() { return "group " + id; }
@Override
public int hashCode() { return id; }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if (!(other instanceof Group)) return false;
return ((Group) other).id == this.id;
}
} | class Group {
private final int id;
private final ImmutableList<Node> nodes;
private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true);
private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false);
private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true);
private final static double MAX_UNBALANCE = 0.10;
private static final Logger log = Logger.getLogger(Group.class.getName());
public Group(int id, List<Node> nodes) {
this.id = id;
this.nodes = ImmutableList.copyOf(nodes);
int idx = 0;
for(var node: nodes) {
node.setPathIndex(idx);
idx++;
}
}
/** Returns the unique identity of this group */
public int id() { return id; }
/** Returns the nodes in this group as an immutable list */
public ImmutableList<Node> nodes() { return nodes; }
/**
* Returns whether this group has sufficient active documents
* (compared to other groups) that is should receive traffic
*/
public boolean hasSufficientCoverage() {
return hasSufficientCoverage.get();
}
void setHasSufficientCoverage(boolean sufficientCoverage) {
hasSufficientCoverage.lazySet(sufficientCoverage);
}
public int workingNodes() {
return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count();
}
void aggregateNodeValues() {
long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum();
activeDocuments.set(activeDocs);
isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites));
int numWorkingNodes = workingNodes();
if (numWorkingNodes > 0) {
long average = activeDocs / numWorkingNodes;
long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum();
boolean isDeviationSmall = deviation <= maxUnbalance(activeDocs);
if ((!isContentWellBalanced.get() || isDeviationSmall != isContentWellBalanced.get()) && (activeDocs > 0)) {
log.info("Content is " + (isDeviationSmall ? "" : "not ") + "well balanced. Current deviation = " + deviation*100/activeDocs + " %" +
". activeDocs = " + activeDocs + ", deviation = " + deviation + ", average = " + average);
isContentWellBalanced.set(isDeviationSmall);
}
} else {
isContentWellBalanced.set(true);
}
}
/** Returns the active documents on this group. If unknown, 0 is returned. */
long getActiveDocuments() { return activeDocuments.get(); }
/** Returns whether any node in this group is currently blocking write operations */
public boolean isBlockingWrites() { return isBlockingWrites.get(); }
public boolean isContentWellBalanced() { return isContentWellBalanced.get(); }
public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) {
boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow);
return previousState != hasFullCoverageNow;
}
@Override
public String toString() { return "group " + id; }
@Override
public int hashCode() { return id; }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if (!(other instanceof Group)) return false;
return ((Group) other).id == this.id;
}
} |
A hundred maybe? But with that option we should probably also change the name of this ... I take this one if you like. | double maxUnbalance(long activeDocs) {
return Math.max(1, activeDocs * MAX_UNBALANCE);
} | return Math.max(1, activeDocs * MAX_UNBALANCE); | double maxUnbalance(long activeDocs) {
return Math.max(1, activeDocs * MAX_UNBALANCE);
} | class Group {
private final int id;
private final ImmutableList<Node> nodes;
private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true);
private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false);
private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true);
private final static double MAX_UNBALANCE = 0.10;
private static final Logger log = Logger.getLogger(Group.class.getName());
public Group(int id, List<Node> nodes) {
this.id = id;
this.nodes = ImmutableList.copyOf(nodes);
int idx = 0;
for(var node: nodes) {
node.setPathIndex(idx);
idx++;
}
}
/** Returns the unique identity of this group */
public int id() { return id; }
/** Returns the nodes in this group as an immutable list */
public ImmutableList<Node> nodes() { return nodes; }
/**
* Returns whether this group has sufficient active documents
* (compared to other groups) that is should receive traffic
*/
public boolean hasSufficientCoverage() {
return hasSufficientCoverage.get();
}
void setHasSufficientCoverage(boolean sufficientCoverage) {
hasSufficientCoverage.lazySet(sufficientCoverage);
}
public int workingNodes() {
return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count();
}
void aggregateNodeValues() {
long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum();
activeDocuments.set(activeDocs);
isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites));
int numWorkingNodes = workingNodes();
if (numWorkingNodes > 0) {
long average = activeDocs / numWorkingNodes;
long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum();
boolean isDeviationSmall = deviation <= maxUnbalance(activeDocs);
if ((!isContentWellBalanced.get() || isDeviationSmall != isContentWellBalanced.get()) && (activeDocs > 0)) {
log.info("Content is " + (isDeviationSmall ? "" : "not ") + "well balanced. Current deviation = " + deviation*100/activeDocs + " %" +
". activeDocs = " + activeDocs + ", deviation = " + deviation + ", average = " + average);
isContentWellBalanced.set(isDeviationSmall);
}
} else {
isContentWellBalanced.set(true);
}
}
/** Returns the active documents on this group. If unknown, 0 is returned. */
long getActiveDocuments() { return activeDocuments.get(); }
/** Returns whether any node in this group is currently blocking write operations */
public boolean isBlockingWrites() { return isBlockingWrites.get(); }
public boolean isContentWellBalanced() { return isContentWellBalanced.get(); }
public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) {
boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow);
return previousState != hasFullCoverageNow;
}
@Override
public String toString() { return "group " + id; }
@Override
public int hashCode() { return id; }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if (!(other instanceof Group)) return false;
return ((Group) other).id == this.id;
}
} | class Group {
private final int id;
private final ImmutableList<Node> nodes;
private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true);
private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false);
private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true);
private final static double MAX_UNBALANCE = 0.10;
private static final Logger log = Logger.getLogger(Group.class.getName());
public Group(int id, List<Node> nodes) {
this.id = id;
this.nodes = ImmutableList.copyOf(nodes);
int idx = 0;
for(var node: nodes) {
node.setPathIndex(idx);
idx++;
}
}
/** Returns the unique identity of this group */
public int id() { return id; }
/** Returns the nodes in this group as an immutable list */
public ImmutableList<Node> nodes() { return nodes; }
/**
* Returns whether this group has sufficient active documents
* (compared to other groups) that is should receive traffic
*/
public boolean hasSufficientCoverage() {
return hasSufficientCoverage.get();
}
void setHasSufficientCoverage(boolean sufficientCoverage) {
hasSufficientCoverage.lazySet(sufficientCoverage);
}
public int workingNodes() {
return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count();
}
void aggregateNodeValues() {
long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum();
activeDocuments.set(activeDocs);
isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites));
int numWorkingNodes = workingNodes();
if (numWorkingNodes > 0) {
long average = activeDocs / numWorkingNodes;
long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum();
boolean isDeviationSmall = deviation <= maxUnbalance(activeDocs);
if ((!isContentWellBalanced.get() || isDeviationSmall != isContentWellBalanced.get()) && (activeDocs > 0)) {
log.info("Content is " + (isDeviationSmall ? "" : "not ") + "well balanced. Current deviation = " + deviation*100/activeDocs + " %" +
". activeDocs = " + activeDocs + ", deviation = " + deviation + ", average = " + average);
isContentWellBalanced.set(isDeviationSmall);
}
} else {
isContentWellBalanced.set(true);
}
}
/** Returns the active documents on this group. If unknown, 0 is returned. */
long getActiveDocuments() { return activeDocuments.get(); }
/** Returns whether any node in this group is currently blocking write operations */
public boolean isBlockingWrites() { return isBlockingWrites.get(); }
public boolean isContentWellBalanced() { return isContentWellBalanced.get(); }
public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) {
boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow);
return previousState != hasFullCoverageNow;
}
@Override
public String toString() { return "group " + id; }
@Override
public int hashCode() { return id; }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if (!(other instanceof Group)) return false;
return ((Group) other).id == this.id;
}
} |
Yes, please take this one if you have time | double maxUnbalance(long activeDocs) {
return Math.max(1, activeDocs * MAX_UNBALANCE);
} | return Math.max(1, activeDocs * MAX_UNBALANCE); | double maxUnbalance(long activeDocs) {
return Math.max(1, activeDocs * MAX_UNBALANCE);
} | class Group {
private final int id;
private final ImmutableList<Node> nodes;
private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true);
private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false);
private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true);
private final static double MAX_UNBALANCE = 0.10;
private static final Logger log = Logger.getLogger(Group.class.getName());
public Group(int id, List<Node> nodes) {
this.id = id;
this.nodes = ImmutableList.copyOf(nodes);
int idx = 0;
for(var node: nodes) {
node.setPathIndex(idx);
idx++;
}
}
/** Returns the unique identity of this group */
public int id() { return id; }
/** Returns the nodes in this group as an immutable list */
public ImmutableList<Node> nodes() { return nodes; }
/**
* Returns whether this group has sufficient active documents
* (compared to other groups) that is should receive traffic
*/
public boolean hasSufficientCoverage() {
return hasSufficientCoverage.get();
}
void setHasSufficientCoverage(boolean sufficientCoverage) {
hasSufficientCoverage.lazySet(sufficientCoverage);
}
public int workingNodes() {
return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count();
}
void aggregateNodeValues() {
long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum();
activeDocuments.set(activeDocs);
isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites));
int numWorkingNodes = workingNodes();
if (numWorkingNodes > 0) {
long average = activeDocs / numWorkingNodes;
long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum();
boolean isDeviationSmall = deviation <= maxUnbalance(activeDocs);
if ((!isContentWellBalanced.get() || isDeviationSmall != isContentWellBalanced.get()) && (activeDocs > 0)) {
log.info("Content is " + (isDeviationSmall ? "" : "not ") + "well balanced. Current deviation = " + deviation*100/activeDocs + " %" +
". activeDocs = " + activeDocs + ", deviation = " + deviation + ", average = " + average);
isContentWellBalanced.set(isDeviationSmall);
}
} else {
isContentWellBalanced.set(true);
}
}
/** Returns the active documents on this group. If unknown, 0 is returned. */
long getActiveDocuments() { return activeDocuments.get(); }
/** Returns whether any node in this group is currently blocking write operations */
public boolean isBlockingWrites() { return isBlockingWrites.get(); }
public boolean isContentWellBalanced() { return isContentWellBalanced.get(); }
public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) {
boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow);
return previousState != hasFullCoverageNow;
}
@Override
public String toString() { return "group " + id; }
@Override
public int hashCode() { return id; }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if (!(other instanceof Group)) return false;
return ((Group) other).id == this.id;
}
} | class Group {
private final int id;
private final ImmutableList<Node> nodes;
private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true);
private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false);
private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true);
private final static double MAX_UNBALANCE = 0.10;
private static final Logger log = Logger.getLogger(Group.class.getName());
public Group(int id, List<Node> nodes) {
this.id = id;
this.nodes = ImmutableList.copyOf(nodes);
int idx = 0;
for(var node: nodes) {
node.setPathIndex(idx);
idx++;
}
}
/** Returns the unique identity of this group */
public int id() { return id; }
/** Returns the nodes in this group as an immutable list */
public ImmutableList<Node> nodes() { return nodes; }
/**
* Returns whether this group has sufficient active documents
* (compared to other groups) that is should receive traffic
*/
public boolean hasSufficientCoverage() {
return hasSufficientCoverage.get();
}
void setHasSufficientCoverage(boolean sufficientCoverage) {
hasSufficientCoverage.lazySet(sufficientCoverage);
}
public int workingNodes() {
return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count();
}
void aggregateNodeValues() {
long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum();
activeDocuments.set(activeDocs);
isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites));
int numWorkingNodes = workingNodes();
if (numWorkingNodes > 0) {
long average = activeDocs / numWorkingNodes;
long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum();
boolean isDeviationSmall = deviation <= maxUnbalance(activeDocs);
if ((!isContentWellBalanced.get() || isDeviationSmall != isContentWellBalanced.get()) && (activeDocs > 0)) {
log.info("Content is " + (isDeviationSmall ? "" : "not ") + "well balanced. Current deviation = " + deviation*100/activeDocs + " %" +
". activeDocs = " + activeDocs + ", deviation = " + deviation + ", average = " + average);
isContentWellBalanced.set(isDeviationSmall);
}
} else {
isContentWellBalanced.set(true);
}
}
/** Returns the active documents on this group. If unknown, 0 is returned. */
long getActiveDocuments() { return activeDocuments.get(); }
/** Returns whether any node in this group is currently blocking write operations */
public boolean isBlockingWrites() { return isBlockingWrites.get(); }
public boolean isContentWellBalanced() { return isContentWellBalanced.get(); }
public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) {
boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow);
return previousState != hasFullCoverageNow;
}
@Override
public String toString() { return "group " + id; }
@Override
public int hashCode() { return id; }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if (!(other instanceof Group)) return false;
return ((Group) other).id == this.id;
}
} |
There are two locks ... | public void close() {
synchronized (clusterTable.writeLock) {
CairoEngine myEngine = engine.getAndSet(null);
if (myEngine != null) {
myEngine.close();
}
}
} | synchronized (clusterTable.writeLock) { | public void close() {
if (closed.getAndSet(true)) return;
synchronized (nodeTable.writeLock) {
synchronized (clusterTable.writeLock) {
for (SqlCompiler sqlCompiler : sqlCompilerPool)
sqlCompiler.close();
engine.close();
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final AtomicReference<CairoEngine> engine = new AtomicReference<>();
private final ThreadLocal<SqlCompiler> sqlCompiler;
private final AtomicInteger nullRecords = new AtomicInteger();
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine.set(new CairoEngine(new DefaultCairoConfiguration(dataDir)));
sqlCompiler = ThreadLocal.withInitial(() -> new SqlCompiler(engine.get()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
public int getNullRecordsCount() { return nullRecords.get(); }
@Override
public void gc() {
nullRecords.set(0);
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
if (record == null || record.getStr(0) == null) {
nullRecords.incrementAndGet();
continue;
}
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
return sqlCompiler.get().compile(sql, context);
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine.get(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine.get().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final CairoEngine engine;
private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool;
private final AtomicBoolean closed = new AtomicBoolean(false);
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine = new CairoEngine(new DefaultCairoConfiguration(dataDir));
sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
private CairoEngine engine() {
if (closed.get())
throw new IllegalStateException("Attempted to access QuestDb after calling close");
return engine;
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
SqlCompiler sqlCompiler = sqlCompilerPool.alloc();
try {
return sqlCompiler.compile(sql, context);
} finally {
sqlCompilerPool.free(sqlCompiler);
}
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} |
Now we need to check for null here too. | private void ensureClusterTableIsUpdated() {
try {
if (0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
} | if (0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) { | private void ensureClusterTableIsUpdated() {
try {
if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final AtomicReference<CairoEngine> engine = new AtomicReference<>();
private final ThreadLocal<SqlCompiler> sqlCompiler;
private final AtomicInteger nullRecords = new AtomicInteger();
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine.set(new CairoEngine(new DefaultCairoConfiguration(dataDir)));
sqlCompiler = ThreadLocal.withInitial(() -> new SqlCompiler(engine.get()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
public int getNullRecordsCount() { return nullRecords.get(); }
@Override
public void gc() {
nullRecords.set(0);
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
public void close() {
synchronized (clusterTable.writeLock) {
CairoEngine myEngine = engine.getAndSet(null);
if (myEngine != null) {
myEngine.close();
}
}
}
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
if (record == null || record.getStr(0) == null) {
nullRecords.incrementAndGet();
continue;
}
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
return sqlCompiler.get().compile(sql, context);
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine.get(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine.get().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final CairoEngine engine;
private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool;
private final AtomicBoolean closed = new AtomicBoolean(false);
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine = new CairoEngine(new DefaultCairoConfiguration(dataDir));
sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
private CairoEngine engine() {
if (closed.get())
throw new IllegalStateException("Attempted to access QuestDb after calling close");
return engine;
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
public void close() {
if (closed.getAndSet(true)) return;
synchronized (nodeTable.writeLock) {
synchronized (clusterTable.writeLock) {
for (SqlCompiler sqlCompiler : sqlCompilerPool)
sqlCompiler.close();
engine.close();
}
}
}
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
SqlCompiler sqlCompiler = sqlCompilerPool.alloc();
try {
return sqlCompiler.compile(sql, context);
} finally {
sqlCompilerPool.free(sqlCompiler);
}
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} |
We have a issue that shows up in JNI or very lowlevel(UnSafe) java code that we can not explain. Then it is at least good to verify that we are not accessing objects after they are invalidated. Using an AtomicBoolean is fine and add checks. This was just a suggestion. There might be better ways to do this. I do not know the threading model here so I can not say what is the best approach. But detecting use after close, and closing down the SqlCompilers are minimum I think. I see that the SqlExecutionContext is Closable too, but calling close() on it seems dangerous as it seems to close a shared resource(MessageBus). | public void close() {
synchronized (clusterTable.writeLock) {
CairoEngine myEngine = engine.getAndSet(null);
if (myEngine != null) {
myEngine.close();
}
}
} | CairoEngine myEngine = engine.getAndSet(null); | public void close() {
if (closed.getAndSet(true)) return;
synchronized (nodeTable.writeLock) {
synchronized (clusterTable.writeLock) {
for (SqlCompiler sqlCompiler : sqlCompilerPool)
sqlCompiler.close();
engine.close();
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final AtomicReference<CairoEngine> engine = new AtomicReference<>();
private final ThreadLocal<SqlCompiler> sqlCompiler;
private final AtomicInteger nullRecords = new AtomicInteger();
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine.set(new CairoEngine(new DefaultCairoConfiguration(dataDir)));
sqlCompiler = ThreadLocal.withInitial(() -> new SqlCompiler(engine.get()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
public int getNullRecordsCount() { return nullRecords.get(); }
@Override
public void gc() {
nullRecords.set(0);
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
if (record == null || record.getStr(0) == null) {
nullRecords.incrementAndGet();
continue;
}
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
return sqlCompiler.get().compile(sql, context);
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine.get(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine.get().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final CairoEngine engine;
private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool;
private final AtomicBoolean closed = new AtomicBoolean(false);
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine = new CairoEngine(new DefaultCairoConfiguration(dataDir));
sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
private CairoEngine engine() {
if (closed.get())
throw new IllegalStateException("Attempted to access QuestDb after calling close");
return engine;
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
SqlCompiler sqlCompiler = sqlCompilerPool.alloc();
try {
return sqlCompiler.compile(sql, context);
} finally {
sqlCompilerPool.free(sqlCompiler);
}
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} |
you need to use if (closed.getAndSet(true)) return to make it atomic. If not there is a race betwene get() and set(true). | public void close() {
if (closed.get()) return;
closed.set(true);
synchronized (nodeTable.writeLock) {
synchronized (clusterTable.writeLock) {
for (SqlCompiler sqlCompiler : sqlCompilerPool)
sqlCompiler.close();
engine.close();
}
}
} | closed.set(true); | public void close() {
if (closed.getAndSet(true)) return;
synchronized (nodeTable.writeLock) {
synchronized (clusterTable.writeLock) {
for (SqlCompiler sqlCompiler : sqlCompilerPool)
sqlCompiler.close();
engine.close();
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final CairoEngine engine;
private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool;
private final AtomicBoolean closed = new AtomicBoolean(false);
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine = new CairoEngine(new DefaultCairoConfiguration(dataDir));
sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
private CairoEngine engine() {
if (closed.get())
throw new IllegalStateException("Attempted to access QuestDb after calling close");
return engine;
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
SqlCompiler sqlCompiler = sqlCompilerPool.alloc();
try {
return sqlCompiler.compile(sql, context);
} finally {
sqlCompilerPool.free(sqlCompiler);
}
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final CairoEngine engine;
private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool;
private final AtomicBoolean closed = new AtomicBoolean(false);
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine = new CairoEngine(new DefaultCairoConfiguration(dataDir));
sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
private CairoEngine engine() {
if (closed.get())
throw new IllegalStateException("Attempted to access QuestDb after calling close");
return engine;
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
SqlCompiler sqlCompiler = sqlCompilerPool.alloc();
try {
return sqlCompiler.compile(sql, context);
} finally {
sqlCompilerPool.free(sqlCompiler);
}
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} |
Thanks! Let's try this then ... | public void close() {
if (closed.get()) return;
closed.set(true);
synchronized (nodeTable.writeLock) {
synchronized (clusterTable.writeLock) {
for (SqlCompiler sqlCompiler : sqlCompilerPool)
sqlCompiler.close();
engine.close();
}
}
} | closed.set(true); | public void close() {
if (closed.getAndSet(true)) return;
synchronized (nodeTable.writeLock) {
synchronized (clusterTable.writeLock) {
for (SqlCompiler sqlCompiler : sqlCompilerPool)
sqlCompiler.close();
engine.close();
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final CairoEngine engine;
private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool;
private final AtomicBoolean closed = new AtomicBoolean(false);
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine = new CairoEngine(new DefaultCairoConfiguration(dataDir));
sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
private CairoEngine engine() {
if (closed.get())
throw new IllegalStateException("Attempted to access QuestDb after calling close");
return engine;
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
SqlCompiler sqlCompiler = sqlCompilerPool.alloc();
try {
return sqlCompiler.compile(sql, context);
} finally {
sqlCompilerPool.free(sqlCompiler);
}
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final CairoEngine engine;
private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool;
private final AtomicBoolean closed = new AtomicBoolean(false);
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine = new CairoEngine(new DefaultCairoConfiguration(dataDir));
sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
private CairoEngine engine() {
if (closed.get())
throw new IllegalStateException("Attempted to access QuestDb after calling close");
return engine;
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
SqlCompiler sqlCompiler = sqlCompilerPool.alloc();
try {
return sqlCompiler.compile(sql, context);
} finally {
sqlCompilerPool.free(sqlCompiler);
}
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} |
Want to migrate to the same format as used by ``` /application/v4/tenant/{tenant}/application/{application}/deployment /application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job ``` | static HttpResponse runResponse(Map<RunId, Run> runs, Optional<String> limitStr, URI baseUriForJobType) {
Slime slime = new Slime();
Cursor cursor = slime.setObject();
if (limitStr.isEmpty())
runs.forEach((runid, run) -> runToSlime(cursor.setObject(Long.toString(runid.number())), run, baseUriForJobType));
else {
int limit = limitStr.map(Integer::parseInt).orElse(Integer.MAX_VALUE);
toSlime(cursor.setArray("runs"), runs.values().stream()
.sorted(Comparator.comparing((Run run) -> run.id().number()).reversed())
.collect(Collectors.toUnmodifiableList()), limit, baseUriForJobType);
}
return new SlimeJsonResponse(slime);
} | if (limitStr.isEmpty()) | static HttpResponse runResponse(Map<RunId, Run> runs, Optional<String> limitStr, URI baseUriForJobType) {
Slime slime = new Slime();
Cursor cursor = slime.setObject();
if (limitStr.isEmpty())
runs.forEach((runid, run) -> runToSlime(cursor.setObject(Long.toString(runid.number())), run, baseUriForJobType));
else {
int limit = limitStr.map(Integer::parseInt).orElse(Integer.MAX_VALUE);
toSlime(cursor.setArray("runs"), runs.values().stream()
.sorted(Comparator.comparing((Run run) -> run.id().number()).reversed())
.collect(Collectors.toUnmodifiableList()), limit, baseUriForJobType);
}
return new SlimeJsonResponse(slime);
} | class JobControllerApiHandlerHelper {
/**
* @return Response with all job types that have recorded runs for the application _and_ the status for the last run of that type
*/
static HttpResponse jobTypeResponse(Controller controller, ApplicationId id, URI baseUriForJobs) {
Slime slime = new Slime();
Cursor responseObject = slime.setObject();
Cursor jobsArray = responseObject.setArray("deployment");
Arrays.stream(JobType.values())
.filter(type -> type.environment().isManuallyDeployed())
.map(devType -> new JobId(id, devType))
.forEach(job -> {
Collection<Run> runs = controller.jobController().runs(job).descendingMap().values();
if (runs.isEmpty())
return;
Cursor jobObject = jobsArray.addObject();
jobObject.setString("jobName", job.type().jobName());
toSlime(jobObject.setArray("runs"), runs, 10, baseUriForJobs);
});
return new SlimeJsonResponse(slime);
}
private static void runToSlime(Cursor runObject, Run run, URI baseUriForJobType) {
runObject.setLong("id", run.id().number());
runObject.setString("status", nameOf(run.status()));
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(instant -> runObject.setLong("end", instant.toEpochMilli()));
versionsToSlime(runObject, run.versions());
Cursor stepsObject = runObject.setObject("steps");
run.steps().forEach((step, info) -> stepsObject.setString(step.name(), info.status().name()));
Cursor tasksObject = runObject.setObject("tasks");
taskStatus(deployReal, run).ifPresent(status -> tasksObject.setString("deploy", status));
taskStatus(Step.installReal, run).ifPresent(status -> tasksObject.setString("install", status));
taskStatus(Step.endTests, run).ifPresent(status -> tasksObject.setString("test", status));
runObject.setString("log", baseUriForJobType.resolve(baseUriForJobType.getPath() + "/run/" + run.id().number()).normalize().toString());
}
/** Returns the status of the task represented by the given step, if it has started. */
private static Optional<String> taskStatus(Step step, Run run) {
return run.readySteps().contains(step) ? Optional.of("running")
: Optional.ofNullable(run.steps().get(step))
.filter(info -> info.status() != unfinished)
.map(info -> info.status().name());
}
/** Returns a response with the runs for the given job type. */
private static void versionsToSlime(Cursor runObject, Versions versions) {
runObject.setString("wantedPlatform", versions.targetPlatform().toString());
applicationVersionToSlime(runObject.setObject("wantedApplication"), versions.targetApplication());
versions.sourcePlatform().ifPresent(version -> runObject.setString("currentPlatform", version.toString()));
versions.sourceApplication().ifPresent(version -> applicationVersionToSlime(runObject.setObject("currentApplication"), version));
}
static void applicationVersionToSlime(Cursor versionObject, ApplicationVersion version) {
versionObject.setString("hash", version.id());
if (version.isUnknown())
return;
versionObject.setLong("build", version.buildNumber().getAsLong());
Cursor sourceObject = versionObject.setObject("source");
version.source().ifPresent(source -> {
sourceObject.setString("gitRepository", source.repository());
sourceObject.setString("gitBranch", source.branch());
sourceObject.setString("gitCommit", source.commit());
});
version.sourceUrl().ifPresent(url -> versionObject.setString("sourceUrl", url));
version.commit().ifPresent(commit -> versionObject.setString("commit", commit));
}
/**
* @return Response with logs from a single run
*/
static HttpResponse runDetailsResponse(JobController jobController, RunId runId, String after) {
Slime slime = new Slime();
Cursor detailsObject = slime.setObject();
Run run = jobController.run(runId)
.orElseThrow(() -> new IllegalStateException("Unknown run '" + runId + "'"));
detailsObject.setBool("active", ! run.hasEnded());
detailsObject.setString("status", nameOf(run.status()));
try {
jobController.updateTestLog(runId);
jobController.updateVespaLog(runId);
}
catch (RuntimeException ignored) { }
RunLog runLog = (after == null ? jobController.details(runId) : jobController.details(runId, Long.parseLong(after)))
.orElseThrow(() -> new NotExistsException(Text.format(
"No run details exist for application: %s, job type: %s, number: %d",
runId.application().toShortString(), runId.type().jobName(), runId.number())));
Cursor logObject = detailsObject.setObject("log");
for (Step step : Step.values()) {
if ( ! runLog.get(step).isEmpty())
toSlime(logObject.setArray(step.name()), runLog.get(step));
}
runLog.lastId().ifPresent(id -> detailsObject.setLong("lastId", id));
Cursor stepsObject = detailsObject.setObject("steps");
run.steps().forEach((step, info) -> {
Cursor stepCursor = stepsObject.setObject(step.name());
stepCursor.setString("status", info.status().name());
info.startTime().ifPresent(startTime -> stepCursor.setLong("startMillis", startTime.toEpochMilli()));
run.convergenceSummary().ifPresent(summary -> {
if ( step == installInitialReal && info.status() != succeeded
|| step == installReal && run.stepStatus(installInitialReal).map(status -> status == succeeded).orElse(true))
toSlime(stepCursor.setObject("convergence"), summary);
});
});
Optional<String> testReport = jobController.getTestReport(runId);
testReport.map(SlimeUtils::jsonToSlime)
.map(Slime::get)
.ifPresent(reportCursor -> SlimeUtils.copyObject(reportCursor, detailsObject.setObject("testReport")));
return new SlimeJsonResponse(slime);
}
private static void toSlime(Cursor summaryObject, ConvergenceSummary summary) {
summaryObject.setLong("nodes", summary.nodes());
summaryObject.setLong("down", summary.down());
summaryObject.setLong("needPlatformUpgrade", summary.needPlatformUpgrade());
summaryObject.setLong("upgrading", summary.upgradingPlatform());
summaryObject.setLong("needReboot", summary.needReboot());
summaryObject.setLong("rebooting", summary.rebooting());
summaryObject.setLong("needRestart", summary.needRestart());
summaryObject.setLong("restarting", summary.restarting());
summaryObject.setLong("upgradingOs", summary.upgradingOs());
summaryObject.setLong("upgradingFirmware", summary.upgradingFirmware());
summaryObject.setLong("services", summary.services());
summaryObject.setLong("needNewConfig", summary.needNewConfig());
summaryObject.setLong("retiring", summary.retiring());
}
private static void toSlime(Cursor entryArray, List<LogEntry> entries) {
entries.forEach(entry -> toSlime(entryArray.addObject(), entry));
}
private static void toSlime(Cursor entryObject, LogEntry entry) {
entryObject.setLong("at", entry.at().toEpochMilli());
entryObject.setString("type", entry.type().name());
entryObject.setString("message", entry.message());
}
/**
* Unpack payload and submit to job controller. Defaults instance to 'default' and renders the
* application version on success.
*
* @return Response with the new application version
*/
static HttpResponse submitResponse(JobController jobController, String tenant, String application,
Optional<SourceRevision> sourceRevision, Optional<String> authorEmail,
Optional<String> sourceUrl, long projectId,
ApplicationPackage applicationPackage, byte[] testPackage) {
ApplicationVersion version = jobController.submit(TenantAndApplicationId.from(tenant, application),
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
testPackage);
return new MessageResponse(version.toString());
}
/** Aborts any job of the given type. */
static HttpResponse abortJobResponse(JobController jobs, ApplicationId id, JobType type) {
Slime slime = new Slime();
Cursor responseObject = slime.setObject();
Optional<Run> run = jobs.last(id, type).flatMap(last -> jobs.active(last.id()));
if (run.isPresent()) {
jobs.abort(run.get().id());
responseObject.setString("message", "Aborting " + run.get().id());
}
else
responseObject.setString("message", "Nothing to abort.");
return new SlimeJsonResponse(slime);
}
private static String nameOf(RunStatus status) {
switch (status) {
case running: return "running";
case aborted: return "aborted";
case error: return "error";
case testFailure: return "testFailure";
case endpointCertificateTimeout: return "endpointCertificateTimeout";
case outOfCapacity: return "outOfCapacity";
case installationFailed: return "installationFailed";
case deploymentFailed: return "deploymentFailed";
case success: return "success";
default: throw new IllegalArgumentException("Unexpected status '" + status + "'");
}
}
/**
* @return Response with all job types that have recorded runs for the application _and_ the status for the last run of that type
*/
static HttpResponse overviewResponse(Controller controller, TenantAndApplicationId id, URI baseUriForDeployments) {
Application application = controller.applications().requireApplication(id);
DeploymentStatus status = controller.jobController().deploymentStatus(application);
Slime slime = new Slime();
Cursor responseObject = slime.setObject();
responseObject.setString("tenant", id.tenant().value());
responseObject.setString("application", id.application().value());
application.projectId().ifPresent(projectId -> responseObject.setLong("projectId", projectId));
Map<JobId, List<Versions>> jobsToRun = status.jobsToRun();
Cursor stepsArray = responseObject.setArray("steps");
VersionStatus versionStatus = controller.readVersionStatus();
for (DeploymentStatus.StepStatus stepStatus : status.allSteps()) {
Change change = status.application().require(stepStatus.instance()).change();
Cursor stepObject = stepsArray.addObject();
stepObject.setString("type", stepStatus.type().name());
stepStatus.dependencies().stream()
.map(status.allSteps()::indexOf)
.forEach(stepObject.setArray("dependencies")::addLong);
stepObject.setBool("declared", stepStatus.isDeclared());
stepObject.setString("instance", stepStatus.instance().value());
stepStatus.readyAt(change).ifPresent(ready -> stepObject.setLong("readyAt", ready.toEpochMilli()));
stepStatus.readyAt(change)
.filter(controller.clock().instant()::isBefore)
.ifPresent(until -> stepObject.setLong("delayedUntil", until.toEpochMilli()));
stepStatus.pausedUntil().ifPresent(until -> stepObject.setLong("pausedUntil", until.toEpochMilli()));
stepStatus.coolingDownUntil(change).ifPresent(until -> stepObject.setLong("coolingDownUntil", until.toEpochMilli()));
stepStatus.blockedUntil(Change.of(controller.systemVersion(versionStatus)))
.ifPresent(until -> stepObject.setLong("platformBlockedUntil", until.toEpochMilli()));
application.latestVersion().map(Change::of).flatMap(stepStatus::blockedUntil)
.ifPresent(until -> stepObject.setLong("applicationBlockedUntil", until.toEpochMilli()));
if (stepStatus.type() == DeploymentStatus.StepType.delay)
stepStatus.completedAt(change).ifPresent(completed -> stepObject.setLong("completedAt", completed.toEpochMilli()));
if (stepStatus.type() == DeploymentStatus.StepType.instance) {
Cursor deployingObject = stepObject.setObject("deploying");
if ( ! change.isEmpty()) {
change.platform().ifPresent(version -> deployingObject.setString("platform", version.toString()));
change.application().ifPresent(version -> toSlime(deployingObject.setObject("application"), version));
}
Cursor latestVersionsObject = stepObject.setObject("latestVersions");
List<ChangeBlocker> blockers = application.deploymentSpec().requireInstance(stepStatus.instance()).changeBlocker();
latestVersionWithCompatibleConfidenceAndNotNewerThanSystem(versionStatus.versions(),
application.deploymentSpec().requireInstance(stepStatus.instance()).upgradePolicy())
.ifPresent(latestPlatform -> {
Cursor latestPlatformObject = latestVersionsObject.setObject("platform");
latestPlatformObject.setString("platform", latestPlatform.versionNumber().toFullString());
latestPlatformObject.setLong("at", latestPlatform.committedAt().toEpochMilli());
latestPlatformObject.setBool("upgrade", application.require(stepStatus.instance()).productionDeployments().values().stream()
.anyMatch(deployment -> deployment.version().isBefore(latestPlatform.versionNumber())));
toSlime(latestPlatformObject.setArray("blockers"), blockers.stream().filter(ChangeBlocker::blocksVersions));
});
application.latestVersion().ifPresent(latestApplication -> {
Cursor latestApplicationObject = latestVersionsObject.setObject("application");
toSlime(latestApplicationObject.setObject("application"), latestApplication);
latestApplicationObject.setLong("at", latestApplication.buildTime().orElse(Instant.EPOCH).toEpochMilli());
latestApplicationObject.setBool("upgrade", application.require(stepStatus.instance()).productionDeployments().values().stream()
.anyMatch(deployment -> deployment.applicationVersion().compareTo(latestApplication) < 0));
toSlime(latestApplicationObject.setArray("blockers"), blockers.stream().filter(ChangeBlocker::blocksRevisions));
});
}
stepStatus.job().ifPresent(job -> {
stepObject.setString("jobName", job.type().jobName());
URI baseUriForJob = baseUriForDeployments.resolve(baseUriForDeployments.getPath() +
"/../instance/" + job.application().instance().value() +
"/job/" + job.type().jobName()).normalize();
stepObject.setString("url", baseUriForJob.toString());
stepObject.setString("environment", job.type().environment().value());
stepObject.setString("region", job.type().zone(controller.system()).value());
if (job.type().isProduction() && job.type().isDeployment()) {
status.deploymentFor(job).ifPresent(deployment -> {
stepObject.setString("currentPlatform", deployment.version().toFullString());
toSlime(stepObject.setObject("currentApplication"), deployment.applicationVersion());
});
}
JobStatus jobStatus = status.jobs().get(job).get();
Cursor toRunArray = stepObject.setArray("toRun");
for (Versions versions : jobsToRun.getOrDefault(job, List.of())) {
boolean running = jobStatus.lastTriggered()
.map(run -> jobStatus.isRunning()
&& versions.targetsMatch(run.versions())
&& (job.type().isProduction() || versions.sourcesMatchIfPresent(run.versions())))
.orElse(false);
if (running)
continue;
Cursor runObject = toRunArray.addObject();
toSlime(runObject.setObject("versions"), versions);
}
toSlime(stepObject.setArray("runs"), jobStatus.runs().descendingMap().values(), 10, baseUriForJob);
});
}
return new SlimeJsonResponse(slime);
}
private static void toSlime(Cursor versionObject, ApplicationVersion version) {
version.buildNumber().ifPresent(id -> versionObject.setLong("build", id));
version.compileVersion().ifPresent(platform -> versionObject.setString("compileVersion", platform.toFullString()));
version.sourceUrl().ifPresent(url -> versionObject.setString("sourceUrl", url));
version.commit().ifPresent(commit -> versionObject.setString("commit", commit));
}
private static void toSlime(Cursor versionsObject, Versions versions) {
versionsObject.setString("targetPlatform", versions.targetPlatform().toFullString());
toSlime(versionsObject.setObject("targetApplication"), versions.targetApplication());
versions.sourcePlatform().ifPresent(platform -> versionsObject.setString("sourcePlatform", platform.toFullString()));
versions.sourceApplication().ifPresent(application -> toSlime(versionsObject.setObject("sourceApplication"), application));
}
private static void toSlime(Cursor blockersArray, Stream<ChangeBlocker> blockers) {
blockers.forEach(blocker -> {
Cursor blockerObject = blockersArray.addObject();
blocker.window().days().stream()
.map(day -> day.getDisplayName(TextStyle.SHORT, Locale.ENGLISH))
.forEach(blockerObject.setArray("days")::addString);
blocker.window().hours()
.forEach(blockerObject.setArray("hours")::addLong);
blockerObject.setString("zone", blocker.window().zone().toString());
});
}
private static Optional<VespaVersion> latestVersionWithCompatibleConfidenceAndNotNewerThanSystem(List<VespaVersion> versions,
DeploymentSpec.UpgradePolicy policy) {
int i;
for (i = versions.size(); i-- > 0; )
if (versions.get(i).isSystemVersion())
break;
if (i < 0)
return Optional.empty();
VespaVersion.Confidence required = policy == canary ? broken : normal;
for (int j = i; j >= 0; j--)
if (versions.get(j).confidence().equalOrHigherThan(required))
return Optional.of(versions.get(j));
return Optional.of(versions.get(i));
}
private static void toSlime(Cursor runsArray, Collection<Run> runs, int limit, URI baseUriForJob) {
runs.stream().limit(limit).forEach(run -> {
Cursor runObject = runsArray.addObject();
runObject.setLong("id", run.id().number());
runObject.setString("url", baseUriForJob.resolve(baseUriForJob.getPath() + "/run/" + run.id().number()).toString());
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(end -> runObject.setLong("end", end.toEpochMilli()));
runObject.setString("status", run.status().name());
toSlime(runObject.setObject("versions"), run.versions());
Cursor runStepsArray = runObject.setArray("steps");
run.steps().forEach((step, info) -> {
Cursor runStepObject = runStepsArray.addObject();
runStepObject.setString("name", step.name());
runStepObject.setString("status", info.status().name());
});
});
}
} | class JobControllerApiHandlerHelper {
/**
* @return Response with all job types that have recorded runs for the application _and_ the status for the last run of that type
*/
static HttpResponse jobTypeResponse(Controller controller, ApplicationId id, URI baseUriForJobs) {
Slime slime = new Slime();
Cursor responseObject = slime.setObject();
Cursor jobsArray = responseObject.setArray("deployment");
Arrays.stream(JobType.values())
.filter(type -> type.environment().isManuallyDeployed())
.map(devType -> new JobId(id, devType))
.forEach(job -> {
Collection<Run> runs = controller.jobController().runs(job).descendingMap().values();
if (runs.isEmpty())
return;
Cursor jobObject = jobsArray.addObject();
jobObject.setString("jobName", job.type().jobName());
toSlime(jobObject.setArray("runs"), runs, 10, baseUriForJobs);
});
return new SlimeJsonResponse(slime);
}
private static void runToSlime(Cursor runObject, Run run, URI baseUriForJobType) {
runObject.setLong("id", run.id().number());
runObject.setString("status", nameOf(run.status()));
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(instant -> runObject.setLong("end", instant.toEpochMilli()));
versionsToSlime(runObject, run.versions());
Cursor stepsObject = runObject.setObject("steps");
run.steps().forEach((step, info) -> stepsObject.setString(step.name(), info.status().name()));
Cursor tasksObject = runObject.setObject("tasks");
taskStatus(deployReal, run).ifPresent(status -> tasksObject.setString("deploy", status));
taskStatus(Step.installReal, run).ifPresent(status -> tasksObject.setString("install", status));
taskStatus(Step.endTests, run).ifPresent(status -> tasksObject.setString("test", status));
runObject.setString("log", baseUriForJobType.resolve(baseUriForJobType.getPath() + "/run/" + run.id().number()).normalize().toString());
}
/** Returns the status of the task represented by the given step, if it has started. */
private static Optional<String> taskStatus(Step step, Run run) {
return run.readySteps().contains(step) ? Optional.of("running")
: Optional.ofNullable(run.steps().get(step))
.filter(info -> info.status() != unfinished)
.map(info -> info.status().name());
}
/** Returns a response with the runs for the given job type. */
private static void versionsToSlime(Cursor runObject, Versions versions) {
runObject.setString("wantedPlatform", versions.targetPlatform().toString());
applicationVersionToSlime(runObject.setObject("wantedApplication"), versions.targetApplication());
versions.sourcePlatform().ifPresent(version -> runObject.setString("currentPlatform", version.toString()));
versions.sourceApplication().ifPresent(version -> applicationVersionToSlime(runObject.setObject("currentApplication"), version));
}
static void applicationVersionToSlime(Cursor versionObject, ApplicationVersion version) {
versionObject.setString("hash", version.id());
if (version.isUnknown())
return;
versionObject.setLong("build", version.buildNumber().getAsLong());
Cursor sourceObject = versionObject.setObject("source");
version.source().ifPresent(source -> {
sourceObject.setString("gitRepository", source.repository());
sourceObject.setString("gitBranch", source.branch());
sourceObject.setString("gitCommit", source.commit());
});
version.sourceUrl().ifPresent(url -> versionObject.setString("sourceUrl", url));
version.commit().ifPresent(commit -> versionObject.setString("commit", commit));
}
/**
* @return Response with logs from a single run
*/
static HttpResponse runDetailsResponse(JobController jobController, RunId runId, String after) {
Slime slime = new Slime();
Cursor detailsObject = slime.setObject();
Run run = jobController.run(runId)
.orElseThrow(() -> new IllegalStateException("Unknown run '" + runId + "'"));
detailsObject.setBool("active", ! run.hasEnded());
detailsObject.setString("status", nameOf(run.status()));
try {
jobController.updateTestLog(runId);
jobController.updateVespaLog(runId);
}
catch (RuntimeException ignored) { }
RunLog runLog = (after == null ? jobController.details(runId) : jobController.details(runId, Long.parseLong(after)))
.orElseThrow(() -> new NotExistsException(Text.format(
"No run details exist for application: %s, job type: %s, number: %d",
runId.application().toShortString(), runId.type().jobName(), runId.number())));
Cursor logObject = detailsObject.setObject("log");
for (Step step : Step.values()) {
if ( ! runLog.get(step).isEmpty())
toSlime(logObject.setArray(step.name()), runLog.get(step));
}
runLog.lastId().ifPresent(id -> detailsObject.setLong("lastId", id));
Cursor stepsObject = detailsObject.setObject("steps");
run.steps().forEach((step, info) -> {
Cursor stepCursor = stepsObject.setObject(step.name());
stepCursor.setString("status", info.status().name());
info.startTime().ifPresent(startTime -> stepCursor.setLong("startMillis", startTime.toEpochMilli()));
run.convergenceSummary().ifPresent(summary -> {
if ( step == installInitialReal && info.status() != succeeded
|| step == installReal && run.stepStatus(installInitialReal).map(status -> status == succeeded).orElse(true))
toSlime(stepCursor.setObject("convergence"), summary);
});
});
Optional<String> testReport = jobController.getTestReport(runId);
testReport.map(SlimeUtils::jsonToSlime)
.map(Slime::get)
.ifPresent(reportCursor -> SlimeUtils.copyObject(reportCursor, detailsObject.setObject("testReport")));
return new SlimeJsonResponse(slime);
}
private static void toSlime(Cursor summaryObject, ConvergenceSummary summary) {
summaryObject.setLong("nodes", summary.nodes());
summaryObject.setLong("down", summary.down());
summaryObject.setLong("needPlatformUpgrade", summary.needPlatformUpgrade());
summaryObject.setLong("upgrading", summary.upgradingPlatform());
summaryObject.setLong("needReboot", summary.needReboot());
summaryObject.setLong("rebooting", summary.rebooting());
summaryObject.setLong("needRestart", summary.needRestart());
summaryObject.setLong("restarting", summary.restarting());
summaryObject.setLong("upgradingOs", summary.upgradingOs());
summaryObject.setLong("upgradingFirmware", summary.upgradingFirmware());
summaryObject.setLong("services", summary.services());
summaryObject.setLong("needNewConfig", summary.needNewConfig());
summaryObject.setLong("retiring", summary.retiring());
}
private static void toSlime(Cursor entryArray, List<LogEntry> entries) {
entries.forEach(entry -> toSlime(entryArray.addObject(), entry));
}
private static void toSlime(Cursor entryObject, LogEntry entry) {
entryObject.setLong("at", entry.at().toEpochMilli());
entryObject.setString("type", entry.type().name());
entryObject.setString("message", entry.message());
}
/**
* Unpack payload and submit to job controller. Defaults instance to 'default' and renders the
* application version on success.
*
* @return Response with the new application version
*/
static HttpResponse submitResponse(JobController jobController, String tenant, String application,
Optional<SourceRevision> sourceRevision, Optional<String> authorEmail,
Optional<String> sourceUrl, long projectId,
ApplicationPackage applicationPackage, byte[] testPackage) {
ApplicationVersion version = jobController.submit(TenantAndApplicationId.from(tenant, application),
sourceRevision,
authorEmail,
sourceUrl,
projectId,
applicationPackage,
testPackage);
return new MessageResponse(version.toString());
}
/** Aborts any job of the given type. */
static HttpResponse abortJobResponse(JobController jobs, ApplicationId id, JobType type) {
Slime slime = new Slime();
Cursor responseObject = slime.setObject();
Optional<Run> run = jobs.last(id, type).flatMap(last -> jobs.active(last.id()));
if (run.isPresent()) {
jobs.abort(run.get().id());
responseObject.setString("message", "Aborting " + run.get().id());
}
else
responseObject.setString("message", "Nothing to abort.");
return new SlimeJsonResponse(slime);
}
private static String nameOf(RunStatus status) {
switch (status) {
case running: return "running";
case aborted: return "aborted";
case error: return "error";
case testFailure: return "testFailure";
case endpointCertificateTimeout: return "endpointCertificateTimeout";
case outOfCapacity: return "outOfCapacity";
case installationFailed: return "installationFailed";
case deploymentFailed: return "deploymentFailed";
case success: return "success";
default: throw new IllegalArgumentException("Unexpected status '" + status + "'");
}
}
/**
* @return Response with all job types that have recorded runs for the application _and_ the status for the last run of that type
*/
static HttpResponse overviewResponse(Controller controller, TenantAndApplicationId id, URI baseUriForDeployments) {
Application application = controller.applications().requireApplication(id);
DeploymentStatus status = controller.jobController().deploymentStatus(application);
Slime slime = new Slime();
Cursor responseObject = slime.setObject();
responseObject.setString("tenant", id.tenant().value());
responseObject.setString("application", id.application().value());
application.projectId().ifPresent(projectId -> responseObject.setLong("projectId", projectId));
Map<JobId, List<Versions>> jobsToRun = status.jobsToRun();
Cursor stepsArray = responseObject.setArray("steps");
VersionStatus versionStatus = controller.readVersionStatus();
for (DeploymentStatus.StepStatus stepStatus : status.allSteps()) {
Change change = status.application().require(stepStatus.instance()).change();
Cursor stepObject = stepsArray.addObject();
stepObject.setString("type", stepStatus.type().name());
stepStatus.dependencies().stream()
.map(status.allSteps()::indexOf)
.forEach(stepObject.setArray("dependencies")::addLong);
stepObject.setBool("declared", stepStatus.isDeclared());
stepObject.setString("instance", stepStatus.instance().value());
stepStatus.readyAt(change).ifPresent(ready -> stepObject.setLong("readyAt", ready.toEpochMilli()));
stepStatus.readyAt(change)
.filter(controller.clock().instant()::isBefore)
.ifPresent(until -> stepObject.setLong("delayedUntil", until.toEpochMilli()));
stepStatus.pausedUntil().ifPresent(until -> stepObject.setLong("pausedUntil", until.toEpochMilli()));
stepStatus.coolingDownUntil(change).ifPresent(until -> stepObject.setLong("coolingDownUntil", until.toEpochMilli()));
stepStatus.blockedUntil(Change.of(controller.systemVersion(versionStatus)))
.ifPresent(until -> stepObject.setLong("platformBlockedUntil", until.toEpochMilli()));
application.latestVersion().map(Change::of).flatMap(stepStatus::blockedUntil)
.ifPresent(until -> stepObject.setLong("applicationBlockedUntil", until.toEpochMilli()));
if (stepStatus.type() == DeploymentStatus.StepType.delay)
stepStatus.completedAt(change).ifPresent(completed -> stepObject.setLong("completedAt", completed.toEpochMilli()));
if (stepStatus.type() == DeploymentStatus.StepType.instance) {
Cursor deployingObject = stepObject.setObject("deploying");
if ( ! change.isEmpty()) {
change.platform().ifPresent(version -> deployingObject.setString("platform", version.toString()));
change.application().ifPresent(version -> toSlime(deployingObject.setObject("application"), version));
}
Cursor latestVersionsObject = stepObject.setObject("latestVersions");
List<ChangeBlocker> blockers = application.deploymentSpec().requireInstance(stepStatus.instance()).changeBlocker();
latestVersionWithCompatibleConfidenceAndNotNewerThanSystem(versionStatus.versions(),
application.deploymentSpec().requireInstance(stepStatus.instance()).upgradePolicy())
.ifPresent(latestPlatform -> {
Cursor latestPlatformObject = latestVersionsObject.setObject("platform");
latestPlatformObject.setString("platform", latestPlatform.versionNumber().toFullString());
latestPlatformObject.setLong("at", latestPlatform.committedAt().toEpochMilli());
latestPlatformObject.setBool("upgrade", application.require(stepStatus.instance()).productionDeployments().values().stream()
.anyMatch(deployment -> deployment.version().isBefore(latestPlatform.versionNumber())));
toSlime(latestPlatformObject.setArray("blockers"), blockers.stream().filter(ChangeBlocker::blocksVersions));
});
application.latestVersion().ifPresent(latestApplication -> {
Cursor latestApplicationObject = latestVersionsObject.setObject("application");
toSlime(latestApplicationObject.setObject("application"), latestApplication);
latestApplicationObject.setLong("at", latestApplication.buildTime().orElse(Instant.EPOCH).toEpochMilli());
latestApplicationObject.setBool("upgrade", application.require(stepStatus.instance()).productionDeployments().values().stream()
.anyMatch(deployment -> deployment.applicationVersion().compareTo(latestApplication) < 0));
toSlime(latestApplicationObject.setArray("blockers"), blockers.stream().filter(ChangeBlocker::blocksRevisions));
});
}
stepStatus.job().ifPresent(job -> {
stepObject.setString("jobName", job.type().jobName());
URI baseUriForJob = baseUriForDeployments.resolve(baseUriForDeployments.getPath() +
"/../instance/" + job.application().instance().value() +
"/job/" + job.type().jobName()).normalize();
stepObject.setString("url", baseUriForJob.toString());
stepObject.setString("environment", job.type().environment().value());
stepObject.setString("region", job.type().zone(controller.system()).value());
if (job.type().isProduction() && job.type().isDeployment()) {
status.deploymentFor(job).ifPresent(deployment -> {
stepObject.setString("currentPlatform", deployment.version().toFullString());
toSlime(stepObject.setObject("currentApplication"), deployment.applicationVersion());
});
}
JobStatus jobStatus = status.jobs().get(job).get();
Cursor toRunArray = stepObject.setArray("toRun");
for (Versions versions : jobsToRun.getOrDefault(job, List.of())) {
boolean running = jobStatus.lastTriggered()
.map(run -> jobStatus.isRunning()
&& versions.targetsMatch(run.versions())
&& (job.type().isProduction() || versions.sourcesMatchIfPresent(run.versions())))
.orElse(false);
if (running)
continue;
Cursor runObject = toRunArray.addObject();
toSlime(runObject.setObject("versions"), versions);
}
toSlime(stepObject.setArray("runs"), jobStatus.runs().descendingMap().values(), 10, baseUriForJob);
});
}
return new SlimeJsonResponse(slime);
}
private static void toSlime(Cursor versionObject, ApplicationVersion version) {
version.buildNumber().ifPresent(id -> versionObject.setLong("build", id));
version.compileVersion().ifPresent(platform -> versionObject.setString("compileVersion", platform.toFullString()));
version.sourceUrl().ifPresent(url -> versionObject.setString("sourceUrl", url));
version.commit().ifPresent(commit -> versionObject.setString("commit", commit));
}
private static void toSlime(Cursor versionsObject, Versions versions) {
versionsObject.setString("targetPlatform", versions.targetPlatform().toFullString());
toSlime(versionsObject.setObject("targetApplication"), versions.targetApplication());
versions.sourcePlatform().ifPresent(platform -> versionsObject.setString("sourcePlatform", platform.toFullString()));
versions.sourceApplication().ifPresent(application -> toSlime(versionsObject.setObject("sourceApplication"), application));
}
private static void toSlime(Cursor blockersArray, Stream<ChangeBlocker> blockers) {
blockers.forEach(blocker -> {
Cursor blockerObject = blockersArray.addObject();
blocker.window().days().stream()
.map(day -> day.getDisplayName(TextStyle.SHORT, Locale.ENGLISH))
.forEach(blockerObject.setArray("days")::addString);
blocker.window().hours()
.forEach(blockerObject.setArray("hours")::addLong);
blockerObject.setString("zone", blocker.window().zone().toString());
});
}
private static Optional<VespaVersion> latestVersionWithCompatibleConfidenceAndNotNewerThanSystem(List<VespaVersion> versions,
DeploymentSpec.UpgradePolicy policy) {
int i;
for (i = versions.size(); i-- > 0; )
if (versions.get(i).isSystemVersion())
break;
if (i < 0)
return Optional.empty();
VespaVersion.Confidence required = policy == canary ? broken : normal;
for (int j = i; j >= 0; j--)
if (versions.get(j).confidence().equalOrHigherThan(required))
return Optional.of(versions.get(j));
return Optional.of(versions.get(i));
}
private static void toSlime(Cursor runsArray, Collection<Run> runs, int limit, URI baseUriForJob) {
runs.stream().limit(limit).forEach(run -> {
Cursor runObject = runsArray.addObject();
runObject.setLong("id", run.id().number());
runObject.setString("url", baseUriForJob.resolve(baseUriForJob.getPath() + "/run/" + run.id().number()).toString());
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(end -> runObject.setLong("end", end.toEpochMilli()));
runObject.setString("status", run.status().name());
toSlime(runObject.setObject("versions"), run.versions());
Cursor runStepsArray = runObject.setArray("steps");
run.steps().forEach((step, info) -> {
Cursor runStepObject = runStepsArray.addObject();
runStepObject.setString("name", step.name());
runStepObject.setString("status", info.status().name());
});
});
}
} |
Won't you get a 403 from the AC filter? | public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
} | "{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404); | public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID).properties(Map.of("limit", "100")),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployDirectly() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester().upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
}
@Test
public void testErrorResponses() throws Exception {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployDirectly() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester().upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
}
@Test
public void testErrorResponses() throws Exception {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} |
No, because roles are on tenant level | public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
} | "{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404); | public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID).properties(Map.of("limit", "100")),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployDirectly() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester().upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
}
@Test
public void testErrorResponses() throws Exception {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployDirectly() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester().upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
}
@Test
public void testErrorResponses() throws Exception {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} |
Great! | public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
} | "{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404); | public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID).properties(Map.of("limit", "100")),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployDirectly() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester().upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
}
@Test
public void testErrorResponses() throws Exception {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployDirectly() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester().upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
}
@Test
public void testErrorResponses() throws Exception {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} |
Don't we need to do something similar in application handler for `expiryTimeEpochMs`? | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().jobStatus(jobId.get())
.runs().descendingMap().values().stream()
.filter(run -> ! run.isRedeployment())
.findFirst()
.map(run -> run.start().plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | .filter(run -> ! run.isRedeployment()) | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().jobStarts(jobId.get()).stream().findFirst()
.map(start -> start.plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected double maintain() {
int attempts = 0;
int failures = 0;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
attempts++;
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
failures++;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return asSuccessFactor(attempts, failures);
}
/** Returns whether given deployment has expired according to its TTL */
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected double maintain() {
int attempts = 0;
int failures = 0;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
attempts++;
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
failures++;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return asSuccessFactor(attempts, failures);
}
/** Returns whether given deployment has expired according to its TTL */
} |
Not that it's very important, but I think its easier and you get better result if you just count deployment frequencies by hour and then minimize product sum of `numDeployments(hour + offset) / (offset + 1)`, i.e.: ``` static int mostLikelyWeeHour(int[] starts) { int[] buckets = new int[24]; for (int start : starts) buckets[start]++; int best = -1; double min = Double.MAX_VALUE; for (int i = 12; i < 36; i++) { double sum = buckets[i % 24]; for (int j = 1; j < 12; j++) sum += (buckets[(i + j) % 24] + buckets[(i - j) % 24]) / (j + 1d); if (sum < min) { min = sum; best = i; } } return best % 24; } ``` The expectation for test cases then becomes 15, 13, 15 and 18. So 0, 1, 2, ..., 6 gives the same hour as 6, 5, 4, ..., 0 and 0, 12 alternating gives hour furthest apart from them: 18. | public void testNight() {
assertEquals(14, mostLikelyWeeHour(new int[]{ 0, 1, 2, 3, 4, 5, 6 }));
assertEquals(12, mostLikelyWeeHour(new int[]{ 22, 23, 0, 1, 2, 3, 4 }));
assertEquals(18, mostLikelyWeeHour(new int[]{ 6, 5, 4, 3, 2, 1, 0 }));
assertEquals(13, mostLikelyWeeHour(new int[]{ 0, 12, 0, 12, 0, 12, 0, 12, 0, 12, 0, 12 }));
} | assertEquals(14, mostLikelyWeeHour(new int[]{ 0, 1, 2, 3, 4, 5, 6 })); | public void testNight() {
assertEquals(16, mostLikelyWeeHour(new int[]{ 0, 1, 2, 3, 4, 5, 6 }));
assertEquals(14, mostLikelyWeeHour(new int[]{ 22, 23, 0, 1, 2, 3, 4 }));
assertEquals(18, mostLikelyWeeHour(new int[]{ 6, 5, 4, 3, 2, 1, 0 }));
assertEquals(20, mostLikelyWeeHour(new int[]{ 0, 12, 0, 12, 0, 12, 0, 12, 0, 12, 0, 11 }));
} | class DeploymentUpgraderTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
public void testDeploymentUpgrading() {
ZoneId devZone = ZoneId.from(Environment.dev, RegionName.from("us-east-1"));
DeploymentUpgrader upgrader = new DeploymentUpgrader(tester.controller(), Duration.ofDays(1));
var devApp = tester.newDeploymentContext("tenant1", "app1", "default");
var prodApp = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage appPackage = new ApplicationPackageBuilder().region("us-west-1").build();
Version systemVersion = tester.controller().readSystemVersion();
Instant start = tester.clock().instant().truncatedTo(MILLIS);
devApp.runJob(devUsEast1, appPackage);
prodApp.submit(appPackage).deploy();
assertEquals(systemVersion, tester.jobs().last(devApp.instanceId(), devUsEast1).get().versions().targetPlatform());
assertEquals(systemVersion, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().versions().targetPlatform());
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.controllerTester().upgradeSystem(new Version(7, 8, 9));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(13));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(21));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(3));
upgrader.maintain();
assertEquals(tester.clock().instant().truncatedTo(MILLIS), tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertTrue(tester.jobs().last(devApp.instanceId(), devUsEast1).get().isRedeployment());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
devApp.runJob(devUsEast1);
tester.controllerTester().upgradeSystem(new Version(7, 9, 11));
tester.clock().advance(Duration.ofHours(48));
upgrader.maintain();
assertEquals(tester.clock().instant().truncatedTo(MILLIS), tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
}
@Test
} | class DeploymentUpgraderTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
public void testDeploymentUpgrading() {
ZoneId devZone = ZoneId.from(Environment.dev, RegionName.from("us-east-1"));
DeploymentUpgrader upgrader = new DeploymentUpgrader(tester.controller(), Duration.ofDays(1));
var devApp = tester.newDeploymentContext("tenant1", "app1", "default");
var prodApp = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage appPackage = new ApplicationPackageBuilder().region("us-west-1").build();
Version systemVersion = tester.controller().readSystemVersion();
Instant start = tester.clock().instant().truncatedTo(MILLIS);
devApp.runJob(devUsEast1, appPackage);
prodApp.submit(appPackage).deploy();
assertEquals(systemVersion, tester.jobs().last(devApp.instanceId(), devUsEast1).get().versions().targetPlatform());
assertEquals(systemVersion, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().versions().targetPlatform());
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.controllerTester().upgradeSystem(new Version(7, 8, 9));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(13));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(21));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(3));
upgrader.maintain();
assertEquals(tester.clock().instant().truncatedTo(MILLIS), tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertTrue(tester.jobs().last(devApp.instanceId(), devUsEast1).get().isRedeployment());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
devApp.runJob(devUsEast1);
tester.controllerTester().upgradeSystem(new Version(7, 9, 11));
tester.clock().advance(Duration.ofHours(48));
upgrader.maintain();
assertEquals(tester.clock().instant().truncatedTo(MILLIS), tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
}
@Test
} |
Agreed to merge in offline discussion | public void requireThatConnectionIsClosedAfterXRequests() throws Exception {
final int MAX_REQUESTS = 10;
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.maxRequestsPerConnection(MAX_REQUESTS)
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.NEED_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
ServerConfig.Builder serverConfig = new ServerConfig.Builder()
.connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true));
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
serverConfig,
connectorConfig,
binder -> {});
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader(CONNECTION);
}
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader(CONNECTION, is(CLOSE));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
try {
client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
fail();
} catch (ExecutionException e) {
assertEquals(e.getMessage(), "org.apache.hc.core5.http2.H2StreamResetException: Stream refused");
}
}
assertTrue(driver.close());
} | public void requireThatConnectionIsClosedAfterXRequests() throws Exception {
final int MAX_REQUESTS = 10;
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.maxRequestsPerConnection(MAX_REQUESTS)
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.NEED_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
ServerConfig.Builder serverConfig = new ServerConfig.Builder()
.connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true));
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
serverConfig,
connectorConfig,
binder -> {});
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader(CONNECTION);
}
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader(CONNECTION, is(CLOSE));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
try {
client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
fail();
} catch (ExecutionException e) {
assertEquals(e.getMessage(), "org.apache.hc.core5.http2.H2StreamResetException: Stream refused");
}
}
assertTrue(driver.close());
} | class HttpServerTest {
@Rule
public TemporaryFolder tmpFolder = new TemporaryFolder();
@Test
public void requireThatServerCanListenToRandomPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
assertNotEquals(0, driver.server().getListenPort());
assertTrue(driver.close());
}
@Test
public void requireThatServerCanNotListenToBoundPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
try {
JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.listenPort(driver.server().getListenPort())
);
} catch (final Throwable t) {
assertThat(t.getCause(), instanceOf(BindException.class));
}
assertTrue(driver.close());
}
@Test
public void requireThatBindingSetNotFoundReturns404() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder()
.developerMode(true),
new ConnectorConfig.Builder(),
newBindingSetSelector("unknown"));
driver.client().get("/status.html")
.expectStatusCode(is(NOT_FOUND))
.expectContent(containsPattern(Pattern.compile(
Pattern.quote(BindingSetNotFoundException.class.getName()) +
": No binding set named 'unknown'\\.\n\tat .+",
Pattern.DOTALL | Pattern.MULTILINE)));
assertTrue(driver.close());
}
@Test
public void requireThatTooLongInitLineReturns414() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.requestHeaderSize(1));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
assertTrue(driver.close());
}
@Test
public void requireThatAccessLogIsCalledForRequestRejectedByJetty() throws Exception {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder().requestHeaderSize(1),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
assertEquals(414, entry.statusCode().getAsInt());
assertThat(driver.close(), is(true));
}
@Test
public void requireThatServerCanEcho() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanEchoCompressed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
SimpleHttpClient client = driver.newClient(true);
client.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanHandleMultipleRequests() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostDoesNotRemoveContentByDefault() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostKeepsContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), false);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostRemovesContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithCharsetSpecifiedWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(X_DISABLE_CHUNKING, "true")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=UTF-8")
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatEmptyFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b&c=d")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUriParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormAndUriParametersAreMerged() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d1")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("c=d2&e=f")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d1, d2], e=[f]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormCharsetIsHonored() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=ISO-8859-1")
.setBinaryContent(new byte[]{66, (byte) 230, 114, 61, 98, 108, (byte) 229})
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{B\u00e6r=[bl\u00e5]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUnknownFormCharsetIsTreatedAsBadRequest() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=FLARBA-GARBA-7")
.setContent("a=b")
.execute();
response.expectStatusCode(is(UNSUPPORTED_MEDIA_TYPE));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithPercentEncodedContentIsDecoded() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("%20%3D%C3%98=%22%25+")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{ =\u00d8=[\"% ]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithThrowingHandlerIsExceptionSafe() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ThrowingHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b")
.execute();
response.expectStatusCode(is(INTERNAL_SERVER_ERROR));
assertTrue(driver.close());
}
@Test
public void requireThatMultiPostWorks() throws Exception {
final String startTxtContent = "this is a test for POST.";
final String updaterConfContent
= "identifier = updater\n"
+ "server_type = gds\n";
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.setMultipartContent(
newFileBody("start.txt", startTxtContent),
newFileBody("updater.conf", updaterConfContent))
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString(startTxtContent))
.expectContent(containsString(updaterConfContent));
}
@Test
public void requireThatRequestCookiesAreReceived() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookiePrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(COOKIE, "foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString("[foo=bar]"));
assertTrue(driver.close());
}
@Test
public void requireThatSetCookieHeaderIsCorrect() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookieSetterRequestHandler(
new Cookie("foo", "bar")
.setDomain(".localhost")
.setHttpOnly(true)
.setPath("/foopath")
.setSecure(true)));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("Set-Cookie",
is("foo=bar; Path=/foopath; Domain=.localhost; Secure; HttpOnly"));
assertTrue(driver.close());
}
@Test
public void requireThatTimeoutWorks() throws Exception {
final UnresponsiveHandler requestHandler = new UnresponsiveHandler();
final JettyTestDriver driver = JettyTestDriver.newInstance(requestHandler);
driver.client().get("/status.html")
.expectStatusCode(is(GATEWAY_TIMEOUT));
ResponseDispatch.newInstance(OK).dispatch(requestHandler.responseHandler);
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithNullValueIsOmitted() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", null));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader("X-Foo");
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithEmptyValueIsAllowed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", ""));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("X-Foo", is(""));
assertTrue(driver.close());
}
@Test
public void requireThatNoConnectionHeaderMeansKeepAliveInHttp11KeepAliveDisabled() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler(CONNECTION, CLOSE));
driver.client().get("/status.html")
.expectHeader(CONNECTION, is(CLOSE));
assertThat(driver.close(), is(true));
}
@Test
@Test
public void requireThatServerCanRespondToSslRequest() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
final JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanRespondToHttp2Request() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertNull(response.getBodyText());
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
ConnectionLogEntry entry = connectionLog.logEntries().get(0);
assertEquals("HTTP/2.0", entry.httpProtocol().get());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerRejectsRequestsForNonWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = createSslWithTlsClientAuthenticationEnforcer(certificateFile, privateKeyFile);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/dummy.html")
.expectStatusCode(is(UNAUTHORIZED));
assertTrue(driver.close());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerAllowsRequestForWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectedAtReturnsNonZero() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ConnectedAtRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectContent(matchesPattern("\\d{13,}"));
assertThat(driver.close(), is(true));
}
@Test
public void requireThatGzipEncodingRequestsAreAutomaticallyDecompressed() throws Exception {
JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
String requestContent = generateContent('a', 30);
ResponseValidator response = driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setGzipContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatResponseStatsAreCollected() throws Exception {
RequestTypeHandler handler = new RequestTypeHandler();
JettyTestDriver driver = JettyTestDriver.newInstance(handler);
HttpResponseStatisticsCollector statisticsCollector = ((AbstractHandlerContainer) driver.server().server().getHandler())
.getChildHandlerByClass(HttpResponseStatisticsCollector.class);
{
List<HttpResponseStatisticsCollector.StatisticsEntry> stats = statisticsCollector.takeStatistics();
assertEquals(0, stats.size());
}
{
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("POST", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("write", entry.requestType);
assertEquals(1, entry.value);
}
{
driver.client().newGet("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("GET", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("read", entry.requestType);
assertEquals(1, entry.value);
}
{
handler.setRequestType(Request.RequestType.READ);
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("Handler overrides request type", "read", entry.requestType);
}
assertTrue(driver.close());
}
private HttpResponseStatisticsCollector.StatisticsEntry waitForStatistics(HttpResponseStatisticsCollector
statisticsCollector) {
List<HttpResponseStatisticsCollector.StatisticsEntry> entries = Collections.emptyList();
int tries = 0;
while (entries.isEmpty() && tries < 10000) {
entries = statisticsCollector.takeStatistics();
if (entries.isEmpty())
try {Thread.sleep(100); } catch (InterruptedException e) {}
tries++;
}
assertEquals(1, entries.size());
return entries.get(0);
}
@Test
public void requireThatConnectionThrottleDoesNotBlockConnectionsBelowThreshold() throws Exception {
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.throttling(new Throttling.Builder()
.enabled(true)
.maxAcceptRate(10)
.maxHeapUtilization(1.0)
.maxConnections(10)));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectionIsTrackedInConnectionLog() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
Module overrideModule = binder -> binder.bind(ConnectionLog.class).toInstance(connectionLog);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new OkRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.NEED, overrideModule);
int listenPort = driver.server().getListenPort();
StringBuilder builder = new StringBuilder();
for (int i = 0; i < 1000; i++) {
builder.append(i);
}
byte[] content = builder.toString().getBytes();
for (int i = 0; i < 100; i++) {
driver.client().newPost("/status.html").setBinaryContent(content).execute()
.expectStatusCode(is(OK));
}
assertTrue(driver.close());
List<ConnectionLogEntry> logEntries = connectionLog.logEntries();
Assertions.assertThat(logEntries).hasSize(1);
ConnectionLogEntry logEntry = logEntries.get(0);
assertEquals(4, UUID.fromString(logEntry.id()).version());
Assertions.assertThat(logEntry.timestamp()).isAfter(Instant.EPOCH);
Assertions.assertThat(logEntry.requests()).hasValue(100L);
Assertions.assertThat(logEntry.responses()).hasValue(100L);
Assertions.assertThat(logEntry.peerAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localPort()).hasValue(listenPort);
Assertions.assertThat(logEntry.httpBytesReceived()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(100000L));
Assertions.assertThat(logEntry.httpBytesSent()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(10000L));
Assertions.assertThat(logEntry.sslProtocol()).hasValueSatisfying(TlsContext.ALLOWED_PROTOCOLS::contains);
Assertions.assertThat(logEntry.sslPeerSubject()).hasValue("CN=localhost");
Assertions.assertThat(logEntry.sslCipherSuite()).hasValueSatisfying(cipher -> Assertions.assertThat(cipher).isNotBlank());
Assertions.assertThat(logEntry.sslSessionId()).hasValueSatisfying(sessionId -> Assertions.assertThat(sessionId).hasSize(64));
Assertions.assertThat(logEntry.sslPeerNotBefore()).hasValue(Instant.EPOCH);
Assertions.assertThat(logEntry.sslPeerNotAfter()).hasValue(Instant.EPOCH.plus(100_000, ChronoUnit.DAYS));
}
@Test
public void requireThatRequestIsTrackedInAccessLog() throws IOException, InterruptedException {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder(),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().newPost("/status.html").setContent("abcdef").execute().expectStatusCode(is(OK));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
Assertions.assertThat(entry.statusCode()).hasValue(200);
Assertions.assertThat(entry.requestSize()).hasValue(6);
assertThat(driver.close(), is(true));
}
@Test
public void requireThatRequestsPerConnectionMetricIsAggregated() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
var metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
driver.client().get("/").expectStatusCode(is(OK));
assertThat(driver.close(), is(true));
verify(metricConsumer.mockitoMock(), atLeast(1))
.set(MetricDefinitions.REQUESTS_PER_CONNECTION, 1L, MetricConsumerMock.STATIC_CONTEXT);
}
@Test
public void uriWithEmptyPathSegmentIsAllowed() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
String uriPath = "/path/with/empty
driver.client().get(uriPath).expectStatusCode(is(OK));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
}
private static CloseableHttpAsyncClient createHttp2Client(JettyTestDriver driver) {
TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create()
.setSslContext(driver.sslContext())
.build();
var client = H2AsyncClientBuilder.create()
.disableAutomaticRetries()
.setTlsStrategy(tlsStrategy)
.build();
client.start();
return client;
}
private static JettyTestDriver createSslWithTlsClientAuthenticationEnforcer(Path certificateFile, Path privateKeyFile) {
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.tlsClientAuthEnforcer(
new ConnectorConfig.TlsClientAuthEnforcer.Builder()
.enable(true)
.pathWhitelist("/status.html"))
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.WANT_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
return JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder().connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true)),
connectorConfig,
binder -> {});
}
private static RequestHandler mockRequestHandler() {
final RequestHandler mockRequestHandler = mock(RequestHandler.class);
when(mockRequestHandler.refer()).thenReturn(References.NOOP_REFERENCE);
return mockRequestHandler;
}
private static String generateContent(final char c, final int len) {
final StringBuilder ret = new StringBuilder(len);
for (int i = 0; i < len; ++i) {
ret.append(c);
}
return ret.toString();
}
private static JettyTestDriver newDriverWithFormPostContentRemoved(RequestHandler requestHandler,
boolean removeFormPostBody) throws Exception {
return JettyTestDriver.newConfiguredInstance(
requestHandler,
new ServerConfig.Builder()
.removeRawPostBodyForWwwUrlEncodedPost(removeFormPostBody),
new ConnectorConfig.Builder());
}
private static FormBodyPart newFileBody(final String fileName, final String fileContent) {
return FormBodyPartBuilder.create()
.setBody(
new StringBody(fileContent, ContentType.TEXT_PLAIN) {
@Override public String getFilename() { return fileName; }
@Override public String getMimeType() { return ""; }
@Override public String getCharset() { return null; }
})
.setName(fileName)
.build();
}
private static class ConnectedAtRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpRequest httpRequest = (HttpRequest)request;
final String connectedAt = String.valueOf(httpRequest.getConnectedAt(TimeUnit.MILLISECONDS));
final ContentChannel ch = handler.handleResponse(new Response(OK));
ch.write(ByteBuffer.wrap(connectedAt.getBytes(StandardCharsets.UTF_8)), null);
ch.close(null);
return null;
}
}
private static class CookieSetterRequestHandler extends AbstractRequestHandler {
final Cookie cookie;
CookieSetterRequestHandler(final Cookie cookie) {
this.cookie = cookie;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpResponse response = HttpResponse.newInstance(OK);
response.encodeSetCookieHeader(Collections.singletonList(cookie));
ResponseDispatch.newInstance(response).dispatch(handler);
return null;
}
}
private static class CookiePrinterRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final List<Cookie> cookies = new ArrayList<>(((HttpRequest)request).decodeCookieHeader());
Collections.sort(cookies, new CookieComparator());
final ContentChannel out = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
out.write(StandardCharsets.UTF_8.encode(cookies.toString()), null);
out.close(null);
return null;
}
}
private static class ParameterPrinterRequestHandler extends AbstractRequestHandler {
private static final CompletionHandler NULL_COMPLETION_HANDLER = null;
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Map<String, List<String>> parameters = new TreeMap<>(((HttpRequest)request).parameters());
ContentChannel responseContentChannel = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
responseContentChannel.write(ByteBuffer.wrap(parameters.toString().getBytes(StandardCharsets.UTF_8)),
NULL_COMPLETION_HANDLER);
return responseContentChannel;
}
}
private static class RequestTypeHandler extends AbstractRequestHandler {
private Request.RequestType requestType = null;
public void setRequestType(Request.RequestType requestType) {
this.requestType = requestType;
}
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
response.setRequestType(requestType);
return handler.handleResponse(response);
}
}
private static class ThrowingHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
throw new RuntimeException("Deliberately thrown exception");
}
}
private static class UnresponsiveHandler extends AbstractRequestHandler {
ResponseHandler responseHandler;
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
request.setTimeout(100, TimeUnit.MILLISECONDS);
responseHandler = handler;
return null;
}
}
private static class OkRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
handler.handleResponse(response).close(null);
return NullContent.INSTANCE;
}
}
private static class EchoWithHeaderRequestHandler extends AbstractRequestHandler {
final String headerName;
final String headerValue;
EchoWithHeaderRequestHandler(final String headerName, final String headerValue) {
this.headerName = headerName;
this.headerValue = headerValue;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final Response response = new Response(OK);
response.headers().add(headerName, headerValue);
return handler.handleResponse(response);
}
}
private static Module newBindingSetSelector(final String setName) {
return new AbstractModule() {
@Override
protected void configure() {
bind(BindingSetSelector.class).toInstance(new BindingSetSelector() {
@Override
public String select(final URI uri) {
return setName;
}
});
}
};
}
private static class CookieComparator implements Comparator<Cookie> {
@Override
public int compare(final Cookie lhs, final Cookie rhs) {
return lhs.getName().compareTo(rhs.getName());
}
}
} | class HttpServerTest {
@Rule
public TemporaryFolder tmpFolder = new TemporaryFolder();
@Test
public void requireThatServerCanListenToRandomPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
assertNotEquals(0, driver.server().getListenPort());
assertTrue(driver.close());
}
@Test
public void requireThatServerCanNotListenToBoundPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
try {
JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.listenPort(driver.server().getListenPort())
);
} catch (final Throwable t) {
assertThat(t.getCause(), instanceOf(BindException.class));
}
assertTrue(driver.close());
}
@Test
public void requireThatBindingSetNotFoundReturns404() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder()
.developerMode(true),
new ConnectorConfig.Builder(),
newBindingSetSelector("unknown"));
driver.client().get("/status.html")
.expectStatusCode(is(NOT_FOUND))
.expectContent(containsPattern(Pattern.compile(
Pattern.quote(BindingSetNotFoundException.class.getName()) +
": No binding set named 'unknown'\\.\n\tat .+",
Pattern.DOTALL | Pattern.MULTILINE)));
assertTrue(driver.close());
}
@Test
public void requireThatTooLongInitLineReturns414() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.requestHeaderSize(1));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
assertTrue(driver.close());
}
@Test
public void requireThatAccessLogIsCalledForRequestRejectedByJetty() throws Exception {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder().requestHeaderSize(1),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
assertEquals(414, entry.statusCode().getAsInt());
assertThat(driver.close(), is(true));
}
@Test
public void requireThatServerCanEcho() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanEchoCompressed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
SimpleHttpClient client = driver.newClient(true);
client.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanHandleMultipleRequests() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostDoesNotRemoveContentByDefault() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostKeepsContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), false);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostRemovesContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithCharsetSpecifiedWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(X_DISABLE_CHUNKING, "true")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=UTF-8")
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatEmptyFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b&c=d")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUriParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormAndUriParametersAreMerged() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d1")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("c=d2&e=f")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d1, d2], e=[f]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormCharsetIsHonored() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=ISO-8859-1")
.setBinaryContent(new byte[]{66, (byte) 230, 114, 61, 98, 108, (byte) 229})
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{B\u00e6r=[bl\u00e5]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUnknownFormCharsetIsTreatedAsBadRequest() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=FLARBA-GARBA-7")
.setContent("a=b")
.execute();
response.expectStatusCode(is(UNSUPPORTED_MEDIA_TYPE));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithPercentEncodedContentIsDecoded() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("%20%3D%C3%98=%22%25+")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{ =\u00d8=[\"% ]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithThrowingHandlerIsExceptionSafe() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ThrowingHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b")
.execute();
response.expectStatusCode(is(INTERNAL_SERVER_ERROR));
assertTrue(driver.close());
}
@Test
public void requireThatMultiPostWorks() throws Exception {
final String startTxtContent = "this is a test for POST.";
final String updaterConfContent
= "identifier = updater\n"
+ "server_type = gds\n";
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.setMultipartContent(
newFileBody("start.txt", startTxtContent),
newFileBody("updater.conf", updaterConfContent))
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString(startTxtContent))
.expectContent(containsString(updaterConfContent));
}
@Test
public void requireThatRequestCookiesAreReceived() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookiePrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(COOKIE, "foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString("[foo=bar]"));
assertTrue(driver.close());
}
@Test
public void requireThatSetCookieHeaderIsCorrect() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookieSetterRequestHandler(
new Cookie("foo", "bar")
.setDomain(".localhost")
.setHttpOnly(true)
.setPath("/foopath")
.setSecure(true)));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("Set-Cookie",
is("foo=bar; Path=/foopath; Domain=.localhost; Secure; HttpOnly"));
assertTrue(driver.close());
}
@Test
public void requireThatTimeoutWorks() throws Exception {
final UnresponsiveHandler requestHandler = new UnresponsiveHandler();
final JettyTestDriver driver = JettyTestDriver.newInstance(requestHandler);
driver.client().get("/status.html")
.expectStatusCode(is(GATEWAY_TIMEOUT));
ResponseDispatch.newInstance(OK).dispatch(requestHandler.responseHandler);
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithNullValueIsOmitted() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", null));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader("X-Foo");
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithEmptyValueIsAllowed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", ""));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("X-Foo", is(""));
assertTrue(driver.close());
}
@Test
public void requireThatNoConnectionHeaderMeansKeepAliveInHttp11KeepAliveDisabled() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler(CONNECTION, CLOSE));
driver.client().get("/status.html")
.expectHeader(CONNECTION, is(CLOSE));
assertThat(driver.close(), is(true));
}
@Test
@Test
public void requireThatServerCanRespondToSslRequest() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
final JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanRespondToHttp2Request() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertNull(response.getBodyText());
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
ConnectionLogEntry entry = connectionLog.logEntries().get(0);
assertEquals("HTTP/2.0", entry.httpProtocol().get());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerRejectsRequestsForNonWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = createSslWithTlsClientAuthenticationEnforcer(certificateFile, privateKeyFile);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/dummy.html")
.expectStatusCode(is(UNAUTHORIZED));
assertTrue(driver.close());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerAllowsRequestForWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectedAtReturnsNonZero() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ConnectedAtRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectContent(matchesPattern("\\d{13,}"));
assertThat(driver.close(), is(true));
}
@Test
public void requireThatGzipEncodingRequestsAreAutomaticallyDecompressed() throws Exception {
JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
String requestContent = generateContent('a', 30);
ResponseValidator response = driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setGzipContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatResponseStatsAreCollected() throws Exception {
RequestTypeHandler handler = new RequestTypeHandler();
JettyTestDriver driver = JettyTestDriver.newInstance(handler);
HttpResponseStatisticsCollector statisticsCollector = ((AbstractHandlerContainer) driver.server().server().getHandler())
.getChildHandlerByClass(HttpResponseStatisticsCollector.class);
{
List<HttpResponseStatisticsCollector.StatisticsEntry> stats = statisticsCollector.takeStatistics();
assertEquals(0, stats.size());
}
{
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("POST", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("write", entry.requestType);
assertEquals(1, entry.value);
}
{
driver.client().newGet("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("GET", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("read", entry.requestType);
assertEquals(1, entry.value);
}
{
handler.setRequestType(Request.RequestType.READ);
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("Handler overrides request type", "read", entry.requestType);
}
assertTrue(driver.close());
}
private HttpResponseStatisticsCollector.StatisticsEntry waitForStatistics(HttpResponseStatisticsCollector
statisticsCollector) {
List<HttpResponseStatisticsCollector.StatisticsEntry> entries = Collections.emptyList();
int tries = 0;
while (entries.isEmpty() && tries < 10000) {
entries = statisticsCollector.takeStatistics();
if (entries.isEmpty())
try {Thread.sleep(100); } catch (InterruptedException e) {}
tries++;
}
assertEquals(1, entries.size());
return entries.get(0);
}
@Test
public void requireThatConnectionThrottleDoesNotBlockConnectionsBelowThreshold() throws Exception {
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.throttling(new Throttling.Builder()
.enabled(true)
.maxAcceptRate(10)
.maxHeapUtilization(1.0)
.maxConnections(10)));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectionIsTrackedInConnectionLog() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
Module overrideModule = binder -> binder.bind(ConnectionLog.class).toInstance(connectionLog);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new OkRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.NEED, overrideModule);
int listenPort = driver.server().getListenPort();
StringBuilder builder = new StringBuilder();
for (int i = 0; i < 1000; i++) {
builder.append(i);
}
byte[] content = builder.toString().getBytes();
for (int i = 0; i < 100; i++) {
driver.client().newPost("/status.html").setBinaryContent(content).execute()
.expectStatusCode(is(OK));
}
assertTrue(driver.close());
List<ConnectionLogEntry> logEntries = connectionLog.logEntries();
Assertions.assertThat(logEntries).hasSize(1);
ConnectionLogEntry logEntry = logEntries.get(0);
assertEquals(4, UUID.fromString(logEntry.id()).version());
Assertions.assertThat(logEntry.timestamp()).isAfter(Instant.EPOCH);
Assertions.assertThat(logEntry.requests()).hasValue(100L);
Assertions.assertThat(logEntry.responses()).hasValue(100L);
Assertions.assertThat(logEntry.peerAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localPort()).hasValue(listenPort);
Assertions.assertThat(logEntry.httpBytesReceived()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(100000L));
Assertions.assertThat(logEntry.httpBytesSent()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(10000L));
Assertions.assertThat(logEntry.sslProtocol()).hasValueSatisfying(TlsContext.ALLOWED_PROTOCOLS::contains);
Assertions.assertThat(logEntry.sslPeerSubject()).hasValue("CN=localhost");
Assertions.assertThat(logEntry.sslCipherSuite()).hasValueSatisfying(cipher -> Assertions.assertThat(cipher).isNotBlank());
Assertions.assertThat(logEntry.sslSessionId()).hasValueSatisfying(sessionId -> Assertions.assertThat(sessionId).hasSize(64));
Assertions.assertThat(logEntry.sslPeerNotBefore()).hasValue(Instant.EPOCH);
Assertions.assertThat(logEntry.sslPeerNotAfter()).hasValue(Instant.EPOCH.plus(100_000, ChronoUnit.DAYS));
}
@Test
public void requireThatRequestIsTrackedInAccessLog() throws IOException, InterruptedException {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder(),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().newPost("/status.html").setContent("abcdef").execute().expectStatusCode(is(OK));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
Assertions.assertThat(entry.statusCode()).hasValue(200);
Assertions.assertThat(entry.requestSize()).hasValue(6);
assertThat(driver.close(), is(true));
}
@Test
public void requireThatRequestsPerConnectionMetricIsAggregated() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
var metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
driver.client().get("/").expectStatusCode(is(OK));
assertThat(driver.close(), is(true));
verify(metricConsumer.mockitoMock(), atLeast(1))
.set(MetricDefinitions.REQUESTS_PER_CONNECTION, 1L, MetricConsumerMock.STATIC_CONTEXT);
}
@Test
public void uriWithEmptyPathSegmentIsAllowed() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
String uriPath = "/path/with/empty
driver.client().get(uriPath).expectStatusCode(is(OK));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
}
private static CloseableHttpAsyncClient createHttp2Client(JettyTestDriver driver) {
TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create()
.setSslContext(driver.sslContext())
.build();
var client = H2AsyncClientBuilder.create()
.disableAutomaticRetries()
.setTlsStrategy(tlsStrategy)
.build();
client.start();
return client;
}
private static JettyTestDriver createSslWithTlsClientAuthenticationEnforcer(Path certificateFile, Path privateKeyFile) {
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.tlsClientAuthEnforcer(
new ConnectorConfig.TlsClientAuthEnforcer.Builder()
.enable(true)
.pathWhitelist("/status.html"))
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.WANT_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
return JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder().connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true)),
connectorConfig,
binder -> {});
}
private static RequestHandler mockRequestHandler() {
final RequestHandler mockRequestHandler = mock(RequestHandler.class);
when(mockRequestHandler.refer()).thenReturn(References.NOOP_REFERENCE);
return mockRequestHandler;
}
private static String generateContent(final char c, final int len) {
final StringBuilder ret = new StringBuilder(len);
for (int i = 0; i < len; ++i) {
ret.append(c);
}
return ret.toString();
}
private static JettyTestDriver newDriverWithFormPostContentRemoved(RequestHandler requestHandler,
boolean removeFormPostBody) throws Exception {
return JettyTestDriver.newConfiguredInstance(
requestHandler,
new ServerConfig.Builder()
.removeRawPostBodyForWwwUrlEncodedPost(removeFormPostBody),
new ConnectorConfig.Builder());
}
private static FormBodyPart newFileBody(final String fileName, final String fileContent) {
return FormBodyPartBuilder.create()
.setBody(
new StringBody(fileContent, ContentType.TEXT_PLAIN) {
@Override public String getFilename() { return fileName; }
@Override public String getMimeType() { return ""; }
@Override public String getCharset() { return null; }
})
.setName(fileName)
.build();
}
private static class ConnectedAtRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpRequest httpRequest = (HttpRequest)request;
final String connectedAt = String.valueOf(httpRequest.getConnectedAt(TimeUnit.MILLISECONDS));
final ContentChannel ch = handler.handleResponse(new Response(OK));
ch.write(ByteBuffer.wrap(connectedAt.getBytes(StandardCharsets.UTF_8)), null);
ch.close(null);
return null;
}
}
private static class CookieSetterRequestHandler extends AbstractRequestHandler {
final Cookie cookie;
CookieSetterRequestHandler(final Cookie cookie) {
this.cookie = cookie;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpResponse response = HttpResponse.newInstance(OK);
response.encodeSetCookieHeader(Collections.singletonList(cookie));
ResponseDispatch.newInstance(response).dispatch(handler);
return null;
}
}
private static class CookiePrinterRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final List<Cookie> cookies = new ArrayList<>(((HttpRequest)request).decodeCookieHeader());
Collections.sort(cookies, new CookieComparator());
final ContentChannel out = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
out.write(StandardCharsets.UTF_8.encode(cookies.toString()), null);
out.close(null);
return null;
}
}
private static class ParameterPrinterRequestHandler extends AbstractRequestHandler {
private static final CompletionHandler NULL_COMPLETION_HANDLER = null;
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Map<String, List<String>> parameters = new TreeMap<>(((HttpRequest)request).parameters());
ContentChannel responseContentChannel = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
responseContentChannel.write(ByteBuffer.wrap(parameters.toString().getBytes(StandardCharsets.UTF_8)),
NULL_COMPLETION_HANDLER);
return responseContentChannel;
}
}
private static class RequestTypeHandler extends AbstractRequestHandler {
private Request.RequestType requestType = null;
public void setRequestType(Request.RequestType requestType) {
this.requestType = requestType;
}
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
response.setRequestType(requestType);
return handler.handleResponse(response);
}
}
private static class ThrowingHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
throw new RuntimeException("Deliberately thrown exception");
}
}
private static class UnresponsiveHandler extends AbstractRequestHandler {
ResponseHandler responseHandler;
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
request.setTimeout(100, TimeUnit.MILLISECONDS);
responseHandler = handler;
return null;
}
}
private static class OkRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
handler.handleResponse(response).close(null);
return NullContent.INSTANCE;
}
}
private static class EchoWithHeaderRequestHandler extends AbstractRequestHandler {
final String headerName;
final String headerValue;
EchoWithHeaderRequestHandler(final String headerName, final String headerValue) {
this.headerName = headerName;
this.headerValue = headerValue;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final Response response = new Response(OK);
response.headers().add(headerName, headerValue);
return handler.handleResponse(response);
}
}
private static Module newBindingSetSelector(final String setName) {
return new AbstractModule() {
@Override
protected void configure() {
bind(BindingSetSelector.class).toInstance(new BindingSetSelector() {
@Override
public String select(final URI uri) {
return setName;
}
});
}
};
}
private static class CookieComparator implements Comparator<Cookie> {
@Override
public int compare(final Cookie lhs, final Cookie rhs) {
return lhs.getName().compareTo(rhs.getName());
}
}
} | |
Yes, I missed that one. Thanks! | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().jobStatus(jobId.get())
.runs().descendingMap().values().stream()
.filter(run -> ! run.isRedeployment())
.findFirst()
.map(run -> run.start().plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | .filter(run -> ! run.isRedeployment()) | private boolean isExpired(Deployment deployment, ApplicationId instance) {
if (deployment.zone().environment().isProduction()) return false;
Optional<Duration> ttl = controller().zoneRegistry().getDeploymentTimeToLive(deployment.zone());
if (ttl.isEmpty()) return false;
Optional<JobId> jobId = JobType.from(controller().system(), deployment.zone())
.map(type -> new JobId(instance, type));
if (jobId.isEmpty()) return false;
return controller().jobController().jobStarts(jobId.get()).stream().findFirst()
.map(start -> start.plus(ttl.get()).isBefore(controller().clock().instant()))
.orElse(false);
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected double maintain() {
int attempts = 0;
int failures = 0;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
attempts++;
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
failures++;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return asSuccessFactor(attempts, failures);
}
/** Returns whether given deployment has expired according to its TTL */
} | class DeploymentExpirer extends ControllerMaintainer {
public DeploymentExpirer(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
protected double maintain() {
int attempts = 0;
int failures = 0;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
if (!isExpired(deployment, instance.id())) continue;
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
attempts++;
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
failures++;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
return asSuccessFactor(attempts, failures);
}
/** Returns whether given deployment has expired according to its TTL */
} |
I like the objective function, but I'm keeping the weights :) | public void testNight() {
assertEquals(14, mostLikelyWeeHour(new int[]{ 0, 1, 2, 3, 4, 5, 6 }));
assertEquals(12, mostLikelyWeeHour(new int[]{ 22, 23, 0, 1, 2, 3, 4 }));
assertEquals(18, mostLikelyWeeHour(new int[]{ 6, 5, 4, 3, 2, 1, 0 }));
assertEquals(13, mostLikelyWeeHour(new int[]{ 0, 12, 0, 12, 0, 12, 0, 12, 0, 12, 0, 12 }));
} | assertEquals(14, mostLikelyWeeHour(new int[]{ 0, 1, 2, 3, 4, 5, 6 })); | public void testNight() {
assertEquals(16, mostLikelyWeeHour(new int[]{ 0, 1, 2, 3, 4, 5, 6 }));
assertEquals(14, mostLikelyWeeHour(new int[]{ 22, 23, 0, 1, 2, 3, 4 }));
assertEquals(18, mostLikelyWeeHour(new int[]{ 6, 5, 4, 3, 2, 1, 0 }));
assertEquals(20, mostLikelyWeeHour(new int[]{ 0, 12, 0, 12, 0, 12, 0, 12, 0, 12, 0, 11 }));
} | class DeploymentUpgraderTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
public void testDeploymentUpgrading() {
ZoneId devZone = ZoneId.from(Environment.dev, RegionName.from("us-east-1"));
DeploymentUpgrader upgrader = new DeploymentUpgrader(tester.controller(), Duration.ofDays(1));
var devApp = tester.newDeploymentContext("tenant1", "app1", "default");
var prodApp = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage appPackage = new ApplicationPackageBuilder().region("us-west-1").build();
Version systemVersion = tester.controller().readSystemVersion();
Instant start = tester.clock().instant().truncatedTo(MILLIS);
devApp.runJob(devUsEast1, appPackage);
prodApp.submit(appPackage).deploy();
assertEquals(systemVersion, tester.jobs().last(devApp.instanceId(), devUsEast1).get().versions().targetPlatform());
assertEquals(systemVersion, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().versions().targetPlatform());
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.controllerTester().upgradeSystem(new Version(7, 8, 9));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(13));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(21));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(3));
upgrader.maintain();
assertEquals(tester.clock().instant().truncatedTo(MILLIS), tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertTrue(tester.jobs().last(devApp.instanceId(), devUsEast1).get().isRedeployment());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
devApp.runJob(devUsEast1);
tester.controllerTester().upgradeSystem(new Version(7, 9, 11));
tester.clock().advance(Duration.ofHours(48));
upgrader.maintain();
assertEquals(tester.clock().instant().truncatedTo(MILLIS), tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
}
@Test
} | class DeploymentUpgraderTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
public void testDeploymentUpgrading() {
ZoneId devZone = ZoneId.from(Environment.dev, RegionName.from("us-east-1"));
DeploymentUpgrader upgrader = new DeploymentUpgrader(tester.controller(), Duration.ofDays(1));
var devApp = tester.newDeploymentContext("tenant1", "app1", "default");
var prodApp = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage appPackage = new ApplicationPackageBuilder().region("us-west-1").build();
Version systemVersion = tester.controller().readSystemVersion();
Instant start = tester.clock().instant().truncatedTo(MILLIS);
devApp.runJob(devUsEast1, appPackage);
prodApp.submit(appPackage).deploy();
assertEquals(systemVersion, tester.jobs().last(devApp.instanceId(), devUsEast1).get().versions().targetPlatform());
assertEquals(systemVersion, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().versions().targetPlatform());
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.controllerTester().upgradeSystem(new Version(7, 8, 9));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(13));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(21));
upgrader.maintain();
assertEquals(start, tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
tester.clock().advance(Duration.ofHours(3));
upgrader.maintain();
assertEquals(tester.clock().instant().truncatedTo(MILLIS), tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
assertTrue(tester.jobs().last(devApp.instanceId(), devUsEast1).get().isRedeployment());
assertEquals(start, tester.jobs().last(prodApp.instanceId(), productionUsWest1).get().start());
devApp.runJob(devUsEast1);
tester.controllerTester().upgradeSystem(new Version(7, 9, 11));
tester.clock().advance(Duration.ofHours(48));
upgrader.maintain();
assertEquals(tester.clock().instant().truncatedTo(MILLIS), tester.jobs().last(devApp.instanceId(), devUsEast1).get().start());
}
@Test
} |
Might be nice with a log entry here, as in the VcmrMaintainer | private void approveChangeRequest(ChangeRequest changeRequest) {
if (changeRequest.getApproval() == ChangeRequest.Approval.REQUESTED)
changeRequestClient.approveChangeRequest(changeRequest);
} | changeRequestClient.approveChangeRequest(changeRequest); | private void approveChangeRequest(ChangeRequest changeRequest) {
if (changeRequest.getApproval() == ChangeRequest.Approval.REQUESTED)
changeRequestClient.approveChangeRequest(changeRequest);
} | class ChangeRequestMaintainer extends ControllerMaintainer {
private final Logger logger = Logger.getLogger(ChangeRequestMaintainer.class.getName());
private final ChangeRequestClient changeRequestClient;
private final CuratorDb curator;
private final NodeRepository nodeRepository;
public ChangeRequestMaintainer(Controller controller, Duration interval) {
super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
this.changeRequestClient = controller.serviceRegistry().changeRequestClient();
this.curator = controller.curator();
this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
}
@Override
protected double maintain() {
var currentChangeRequests = pruneOldChangeRequests();
var changeRequests = changeRequestClient.getChangeRequests(currentChangeRequests);
logger.fine(() -> "Found requests: " + changeRequests);
storeChangeRequests(changeRequests);
return 1.0;
}
private void storeChangeRequests(List<ChangeRequest> changeRequests) {
var existingChangeRequests = curator.readChangeRequests()
.stream()
.collect(Collectors.toMap(ChangeRequest::getId, Function.identity()));
var hostsByZone = hostsByZone();
try (var lock = curator.lockChangeRequests()) {
changeRequests.forEach(changeRequest -> {
var optionalZone = inferZone(changeRequest, hostsByZone);
optionalZone.ifPresentOrElse(zone -> {
var vcmr = existingChangeRequests
.getOrDefault(changeRequest.getId(), new VespaChangeRequest(changeRequest, zone))
.withSource(changeRequest.getChangeRequestSource())
.withApproval(changeRequest.getApproval());
logger.fine(() -> "Storing " + vcmr);
curator.writeChangeRequest(vcmr);
},
() -> approveChangeRequest(changeRequest));
});
}
}
private List<ChangeRequest> pruneOldChangeRequests() {
List<ChangeRequest> currentChangeRequests = new ArrayList<>();
try (var lock = curator.lockChangeRequests()) {
for (var changeRequest : curator.readChangeRequests()) {
if (shouldDeleteChangeRequest(changeRequest.getChangeRequestSource())) {
curator.deleteChangeRequest(changeRequest);
} else {
currentChangeRequests.add(changeRequest);
}
}
}
return currentChangeRequests;
}
private Map<ZoneId, List<String>> hostsByZone() {
return controller().zoneRegistry()
.zones()
.reachable()
.in(Environment.prod)
.ids()
.stream()
.collect(Collectors.toMap(
zone -> zone,
zone -> nodeRepository.list(zone, NodeFilter.all())
.stream()
.map(node -> node.hostname().value())
.collect(Collectors.toList())
));
}
private Optional<ZoneId> inferZone(ChangeRequest changeRequest, Map<ZoneId, List<String>> hostsByZone) {
return hostsByZone.entrySet().stream()
.filter(entry -> !Collections.disjoint(entry.getValue(), changeRequest.getImpactedHosts()))
.map(Map.Entry::getKey)
.findFirst();
}
private boolean shouldDeleteChangeRequest(ChangeRequestSource source) {
return source.isClosed() &&
source.getPlannedStartTime()
.plus(Duration.ofDays(7))
.isBefore(ZonedDateTime.now());
}
} | class ChangeRequestMaintainer extends ControllerMaintainer {
private final Logger logger = Logger.getLogger(ChangeRequestMaintainer.class.getName());
private final ChangeRequestClient changeRequestClient;
private final CuratorDb curator;
private final NodeRepository nodeRepository;
public ChangeRequestMaintainer(Controller controller, Duration interval) {
super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
this.changeRequestClient = controller.serviceRegistry().changeRequestClient();
this.curator = controller.curator();
this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
}
@Override
protected double maintain() {
var currentChangeRequests = pruneOldChangeRequests();
var changeRequests = changeRequestClient.getChangeRequests(currentChangeRequests);
logger.fine(() -> "Found requests: " + changeRequests);
storeChangeRequests(changeRequests);
return 1.0;
}
private void storeChangeRequests(List<ChangeRequest> changeRequests) {
var existingChangeRequests = curator.readChangeRequests()
.stream()
.collect(Collectors.toMap(ChangeRequest::getId, Function.identity()));
var hostsByZone = hostsByZone();
try (var lock = curator.lockChangeRequests()) {
changeRequests.forEach(changeRequest -> {
var optionalZone = inferZone(changeRequest, hostsByZone);
optionalZone.ifPresentOrElse(zone -> {
var vcmr = existingChangeRequests
.getOrDefault(changeRequest.getId(), new VespaChangeRequest(changeRequest, zone))
.withSource(changeRequest.getChangeRequestSource())
.withApproval(changeRequest.getApproval());
logger.fine(() -> "Storing " + vcmr);
curator.writeChangeRequest(vcmr);
},
() -> approveChangeRequest(changeRequest));
});
}
}
private List<ChangeRequest> pruneOldChangeRequests() {
List<ChangeRequest> currentChangeRequests = new ArrayList<>();
try (var lock = curator.lockChangeRequests()) {
for (var changeRequest : curator.readChangeRequests()) {
if (shouldDeleteChangeRequest(changeRequest.getChangeRequestSource())) {
curator.deleteChangeRequest(changeRequest);
} else {
currentChangeRequests.add(changeRequest);
}
}
}
return currentChangeRequests;
}
private Map<ZoneId, List<String>> hostsByZone() {
return controller().zoneRegistry()
.zones()
.reachable()
.in(Environment.prod)
.ids()
.stream()
.collect(Collectors.toMap(
zone -> zone,
zone -> nodeRepository.list(zone, NodeFilter.all())
.stream()
.map(node -> node.hostname().value())
.collect(Collectors.toList())
));
}
private Optional<ZoneId> inferZone(ChangeRequest changeRequest, Map<ZoneId, List<String>> hostsByZone) {
return hostsByZone.entrySet().stream()
.filter(entry -> !Collections.disjoint(entry.getValue(), changeRequest.getImpactedHosts()))
.map(Map.Entry::getKey)
.findFirst();
}
private boolean shouldDeleteChangeRequest(ChangeRequestSource source) {
return source.isClosed() &&
source.getPlannedStartTime()
.plus(Duration.ofDays(7))
.isBefore(ZonedDateTime.now());
}
} |
Good point | private void approveChangeRequest(ChangeRequest changeRequest) {
if (changeRequest.getApproval() == ChangeRequest.Approval.REQUESTED)
changeRequestClient.approveChangeRequest(changeRequest);
} | changeRequestClient.approveChangeRequest(changeRequest); | private void approveChangeRequest(ChangeRequest changeRequest) {
if (changeRequest.getApproval() == ChangeRequest.Approval.REQUESTED)
changeRequestClient.approveChangeRequest(changeRequest);
} | class ChangeRequestMaintainer extends ControllerMaintainer {
private final Logger logger = Logger.getLogger(ChangeRequestMaintainer.class.getName());
private final ChangeRequestClient changeRequestClient;
private final CuratorDb curator;
private final NodeRepository nodeRepository;
public ChangeRequestMaintainer(Controller controller, Duration interval) {
super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
this.changeRequestClient = controller.serviceRegistry().changeRequestClient();
this.curator = controller.curator();
this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
}
@Override
protected double maintain() {
var currentChangeRequests = pruneOldChangeRequests();
var changeRequests = changeRequestClient.getChangeRequests(currentChangeRequests);
logger.fine(() -> "Found requests: " + changeRequests);
storeChangeRequests(changeRequests);
return 1.0;
}
private void storeChangeRequests(List<ChangeRequest> changeRequests) {
var existingChangeRequests = curator.readChangeRequests()
.stream()
.collect(Collectors.toMap(ChangeRequest::getId, Function.identity()));
var hostsByZone = hostsByZone();
try (var lock = curator.lockChangeRequests()) {
changeRequests.forEach(changeRequest -> {
var optionalZone = inferZone(changeRequest, hostsByZone);
optionalZone.ifPresentOrElse(zone -> {
var vcmr = existingChangeRequests
.getOrDefault(changeRequest.getId(), new VespaChangeRequest(changeRequest, zone))
.withSource(changeRequest.getChangeRequestSource())
.withApproval(changeRequest.getApproval());
logger.fine(() -> "Storing " + vcmr);
curator.writeChangeRequest(vcmr);
},
() -> approveChangeRequest(changeRequest));
});
}
}
private List<ChangeRequest> pruneOldChangeRequests() {
List<ChangeRequest> currentChangeRequests = new ArrayList<>();
try (var lock = curator.lockChangeRequests()) {
for (var changeRequest : curator.readChangeRequests()) {
if (shouldDeleteChangeRequest(changeRequest.getChangeRequestSource())) {
curator.deleteChangeRequest(changeRequest);
} else {
currentChangeRequests.add(changeRequest);
}
}
}
return currentChangeRequests;
}
private Map<ZoneId, List<String>> hostsByZone() {
return controller().zoneRegistry()
.zones()
.reachable()
.in(Environment.prod)
.ids()
.stream()
.collect(Collectors.toMap(
zone -> zone,
zone -> nodeRepository.list(zone, NodeFilter.all())
.stream()
.map(node -> node.hostname().value())
.collect(Collectors.toList())
));
}
private Optional<ZoneId> inferZone(ChangeRequest changeRequest, Map<ZoneId, List<String>> hostsByZone) {
return hostsByZone.entrySet().stream()
.filter(entry -> !Collections.disjoint(entry.getValue(), changeRequest.getImpactedHosts()))
.map(Map.Entry::getKey)
.findFirst();
}
private boolean shouldDeleteChangeRequest(ChangeRequestSource source) {
return source.isClosed() &&
source.getPlannedStartTime()
.plus(Duration.ofDays(7))
.isBefore(ZonedDateTime.now());
}
} | class ChangeRequestMaintainer extends ControllerMaintainer {
private final Logger logger = Logger.getLogger(ChangeRequestMaintainer.class.getName());
private final ChangeRequestClient changeRequestClient;
private final CuratorDb curator;
private final NodeRepository nodeRepository;
public ChangeRequestMaintainer(Controller controller, Duration interval) {
super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
this.changeRequestClient = controller.serviceRegistry().changeRequestClient();
this.curator = controller.curator();
this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
}
@Override
protected double maintain() {
var currentChangeRequests = pruneOldChangeRequests();
var changeRequests = changeRequestClient.getChangeRequests(currentChangeRequests);
logger.fine(() -> "Found requests: " + changeRequests);
storeChangeRequests(changeRequests);
return 1.0;
}
private void storeChangeRequests(List<ChangeRequest> changeRequests) {
var existingChangeRequests = curator.readChangeRequests()
.stream()
.collect(Collectors.toMap(ChangeRequest::getId, Function.identity()));
var hostsByZone = hostsByZone();
try (var lock = curator.lockChangeRequests()) {
changeRequests.forEach(changeRequest -> {
var optionalZone = inferZone(changeRequest, hostsByZone);
optionalZone.ifPresentOrElse(zone -> {
var vcmr = existingChangeRequests
.getOrDefault(changeRequest.getId(), new VespaChangeRequest(changeRequest, zone))
.withSource(changeRequest.getChangeRequestSource())
.withApproval(changeRequest.getApproval());
logger.fine(() -> "Storing " + vcmr);
curator.writeChangeRequest(vcmr);
},
() -> approveChangeRequest(changeRequest));
});
}
}
private List<ChangeRequest> pruneOldChangeRequests() {
List<ChangeRequest> currentChangeRequests = new ArrayList<>();
try (var lock = curator.lockChangeRequests()) {
for (var changeRequest : curator.readChangeRequests()) {
if (shouldDeleteChangeRequest(changeRequest.getChangeRequestSource())) {
curator.deleteChangeRequest(changeRequest);
} else {
currentChangeRequests.add(changeRequest);
}
}
}
return currentChangeRequests;
}
private Map<ZoneId, List<String>> hostsByZone() {
return controller().zoneRegistry()
.zones()
.reachable()
.in(Environment.prod)
.ids()
.stream()
.collect(Collectors.toMap(
zone -> zone,
zone -> nodeRepository.list(zone, NodeFilter.all())
.stream()
.map(node -> node.hostname().value())
.collect(Collectors.toList())
));
}
private Optional<ZoneId> inferZone(ChangeRequest changeRequest, Map<ZoneId, List<String>> hostsByZone) {
return hostsByZone.entrySet().stream()
.filter(entry -> !Collections.disjoint(entry.getValue(), changeRequest.getImpactedHosts()))
.map(Map.Entry::getKey)
.findFirst();
}
private boolean shouldDeleteChangeRequest(ChangeRequestSource source) {
return source.isClosed() &&
source.getPlannedStartTime()
.plus(Duration.ofDays(7))
.isBefore(ZonedDateTime.now());
}
} |
Not the cleanest API, but I take it you're right. | private static void gracefulShutdown(Connection connection, String reason) {
if (connection instanceof HttpConnection) {
HttpConnection http1 = (HttpConnection) connection;
http1.getGenerator().setPersistent(false);
} else if (connection instanceof HTTP2ServerConnection) {
HTTP2ServerConnection http2 = (HTTP2ServerConnection) connection;
http2.getSession().close(ErrorCode.NO_ERROR.code, reason, Callback.NOOP);
}
} | http2.getSession().close(ErrorCode.NO_ERROR.code, reason, Callback.NOOP); | private static void gracefulShutdown(Connection connection, String reason) {
if (connection instanceof HttpConnection) {
HttpConnection http1 = (HttpConnection) connection;
http1.getGenerator().setPersistent(false);
} else if (connection instanceof HTTP2ServerConnection) {
HTTP2ServerConnection http2 = (HTTP2ServerConnection) connection;
http2.getSession().close(ErrorCode.NO_ERROR.code, reason, Callback.NOOP);
}
} | class HttpRequestDispatch {
private static final Logger log = Logger.getLogger(HttpRequestDispatch.class.getName());
private final static String CHARSET_ANNOTATION = ";charset=";
private final JDiscContext jDiscContext;
private final AsyncContext async;
private final Request jettyRequest;
private final ServletResponseController servletResponseController;
private final RequestHandler requestHandler;
private final RequestMetricReporter metricReporter;
public HttpRequestDispatch(JDiscContext jDiscContext,
AccessLogEntry accessLogEntry,
Context metricContext,
HttpServletRequest servletRequest,
HttpServletResponse servletResponse) throws IOException {
this.jDiscContext = jDiscContext;
requestHandler = newRequestHandler(jDiscContext, accessLogEntry, servletRequest);
this.jettyRequest = (Request) servletRequest;
this.metricReporter = new RequestMetricReporter(jDiscContext.metric, metricContext, jettyRequest.getTimeStamp());
this.servletResponseController = new ServletResponseController(servletRequest,
servletResponse,
jDiscContext.janitor,
metricReporter,
jDiscContext.developerMode());
shutdownConnectionGracefullyIfThresholdReached(jettyRequest);
this.async = servletRequest.startAsync();
async.setTimeout(0);
metricReporter.uriLength(jettyRequest.getOriginalURI().length());
}
public void dispatch() throws IOException {
ServletRequestReader servletRequestReader;
try {
servletRequestReader = handleRequest();
} catch (Throwable throwable) {
servletResponseController.trySendError(throwable);
servletResponseController.finishedFuture().whenComplete((result, exception) ->
completeRequestCallback.accept(null, throwable));
return;
}
try {
onError(servletRequestReader.finishedFuture, servletResponseController::trySendError);
onError(servletResponseController.finishedFuture(), servletRequestReader::onError);
CompletableFuture.allOf(servletRequestReader.finishedFuture, servletResponseController.finishedFuture())
.whenComplete(completeRequestCallback);
} catch (Throwable throwable) {
log.log(Level.WARNING, "Failed registering finished listeners.", throwable);
}
}
private final BiConsumer<Void, Throwable> completeRequestCallback;
{
AtomicBoolean completeRequestCalled = new AtomicBoolean(false);
HttpRequestDispatch parent = this;
completeRequestCallback = (result, error) -> {
boolean alreadyCalled = completeRequestCalled.getAndSet(true);
if (alreadyCalled) {
AssertionError e = new AssertionError("completeRequest called more than once");
log.log(Level.WARNING, "Assertion failed.", e);
throw e;
}
boolean reportedError = false;
if (error != null) {
if (isErrorOfType(error, EofException.class, IOException.class)) {
log.log(Level.FINE,
error,
() -> "Network connection was unexpectedly terminated: " + parent.jettyRequest.getRequestURI());
parent.metricReporter.prematurelyClosed();
} else if (isErrorOfType(error, TimeoutException.class)) {
log.log(Level.FINE,
error,
() -> "Request/stream was timed out by Jetty: " + parent.jettyRequest.getRequestURI());
} else if (!isErrorOfType(error, OverloadException.class, BindingNotFoundException.class, RequestException.class)) {
log.log(Level.WARNING, "Request failed: " + parent.jettyRequest.getRequestURI(), error);
}
reportedError = true;
parent.metricReporter.failedResponse();
} else {
parent.metricReporter.successfulResponse();
}
try {
parent.async.complete();
log.finest(() -> "Request completed successfully: " + parent.jettyRequest.getRequestURI());
} catch (Throwable throwable) {
Level level = reportedError ? Level.FINE: Level.WARNING;
log.log(level, "Async.complete failed", throwable);
}
};
}
private static void shutdownConnectionGracefullyIfThresholdReached(Request request) {
ConnectorConfig connectorConfig = getConnector(request).connectorConfig();
int maxRequestsPerConnection = connectorConfig.maxRequestsPerConnection();
Connection connection = RequestUtils.getConnection(request);
if (maxRequestsPerConnection > 0) {
if (connection.getMessagesIn() >= maxRequestsPerConnection) {
gracefulShutdown(connection, "max-req-per-conn-exceeded");
}
}
double maxConnectionLifeInSeconds = connectorConfig.maxConnectionLife();
if (maxConnectionLifeInSeconds > 0) {
long createdAt = connection.getCreatedTimeStamp();
Instant expiredAt = Instant.ofEpochMilli((long) (createdAt + maxConnectionLifeInSeconds * 1000));
boolean isExpired = Instant.now().isAfter(expiredAt);
if (isExpired) {
gracefulShutdown(connection, "max-conn-life-exceeded");
}
}
}
@SafeVarargs
@SuppressWarnings("varargs")
private static boolean isErrorOfType(Throwable throwable, Class<? extends Throwable>... handledTypes) {
return Arrays.stream(handledTypes)
.anyMatch(
exceptionType -> exceptionType.isInstance(throwable)
|| throwable instanceof CompletionException && exceptionType.isInstance(throwable.getCause()));
}
@SuppressWarnings("try")
private ServletRequestReader handleRequest() throws IOException {
HttpRequest jdiscRequest = HttpRequestFactory.newJDiscRequest(jDiscContext.container, jettyRequest);
ContentChannel requestContentChannel;
try (ResourceReference ref = References.fromResource(jdiscRequest)) {
HttpRequestFactory.copyHeaders(jettyRequest, jdiscRequest);
requestContentChannel = requestHandler.handleRequest(jdiscRequest, servletResponseController.responseHandler);
}
ServletInputStream servletInputStream = jettyRequest.getInputStream();
ServletRequestReader servletRequestReader = new ServletRequestReader(servletInputStream,
requestContentChannel,
jDiscContext.janitor,
metricReporter);
servletInputStream.setReadListener(servletRequestReader);
return servletRequestReader;
}
private static void onError(CompletableFuture<?> future, Consumer<Throwable> errorHandler) {
future.whenComplete((result, exception) -> {
if (exception != null) {
errorHandler.accept(exception);
}
});
}
ContentChannel handleRequestFilterResponse(Response response) {
try {
jettyRequest.getInputStream().close();
ContentChannel responseContentChannel = servletResponseController.responseHandler.handleResponse(response);
servletResponseController.finishedFuture().whenComplete(completeRequestCallback);
return responseContentChannel;
} catch (IOException e) {
throw throwUnchecked(e);
}
}
private static RequestHandler newRequestHandler(JDiscContext context,
AccessLogEntry accessLogEntry,
HttpServletRequest servletRequest) {
RequestHandler requestHandler = wrapHandlerIfFormPost(
new FilteringRequestHandler(context.filterResolver, (Request)servletRequest),
servletRequest, context.serverConfig.removeRawPostBodyForWwwUrlEncodedPost());
return new AccessLoggingRequestHandler(requestHandler, accessLogEntry);
}
private static RequestHandler wrapHandlerIfFormPost(RequestHandler requestHandler,
HttpServletRequest servletRequest,
boolean removeBodyForFormPost) {
if (!servletRequest.getMethod().equals("POST")) {
return requestHandler;
}
String contentType = servletRequest.getHeader(HttpHeaders.Names.CONTENT_TYPE);
if (contentType == null) {
return requestHandler;
}
if (!contentType.startsWith(APPLICATION_X_WWW_FORM_URLENCODED)) {
return requestHandler;
}
return new FormPostRequestHandler(requestHandler, getCharsetName(contentType), removeBodyForFormPost);
}
private static String getCharsetName(String contentType) {
if (!contentType.startsWith(CHARSET_ANNOTATION, APPLICATION_X_WWW_FORM_URLENCODED.length())) {
return StandardCharsets.UTF_8.name();
}
return contentType.substring(APPLICATION_X_WWW_FORM_URLENCODED.length() + CHARSET_ANNOTATION.length());
}
} | class HttpRequestDispatch {
private static final Logger log = Logger.getLogger(HttpRequestDispatch.class.getName());
private final static String CHARSET_ANNOTATION = ";charset=";
private final JDiscContext jDiscContext;
private final AsyncContext async;
private final Request jettyRequest;
private final ServletResponseController servletResponseController;
private final RequestHandler requestHandler;
private final RequestMetricReporter metricReporter;
public HttpRequestDispatch(JDiscContext jDiscContext,
AccessLogEntry accessLogEntry,
Context metricContext,
HttpServletRequest servletRequest,
HttpServletResponse servletResponse) throws IOException {
this.jDiscContext = jDiscContext;
requestHandler = newRequestHandler(jDiscContext, accessLogEntry, servletRequest);
this.jettyRequest = (Request) servletRequest;
this.metricReporter = new RequestMetricReporter(jDiscContext.metric, metricContext, jettyRequest.getTimeStamp());
this.servletResponseController = new ServletResponseController(servletRequest,
servletResponse,
jDiscContext.janitor,
metricReporter,
jDiscContext.developerMode());
shutdownConnectionGracefullyIfThresholdReached(jettyRequest);
this.async = servletRequest.startAsync();
async.setTimeout(0);
metricReporter.uriLength(jettyRequest.getOriginalURI().length());
}
public void dispatch() throws IOException {
ServletRequestReader servletRequestReader;
try {
servletRequestReader = handleRequest();
} catch (Throwable throwable) {
servletResponseController.trySendError(throwable);
servletResponseController.finishedFuture().whenComplete((result, exception) ->
completeRequestCallback.accept(null, throwable));
return;
}
try {
onError(servletRequestReader.finishedFuture, servletResponseController::trySendError);
onError(servletResponseController.finishedFuture(), servletRequestReader::onError);
CompletableFuture.allOf(servletRequestReader.finishedFuture, servletResponseController.finishedFuture())
.whenComplete(completeRequestCallback);
} catch (Throwable throwable) {
log.log(Level.WARNING, "Failed registering finished listeners.", throwable);
}
}
private final BiConsumer<Void, Throwable> completeRequestCallback;
{
AtomicBoolean completeRequestCalled = new AtomicBoolean(false);
HttpRequestDispatch parent = this;
completeRequestCallback = (result, error) -> {
boolean alreadyCalled = completeRequestCalled.getAndSet(true);
if (alreadyCalled) {
AssertionError e = new AssertionError("completeRequest called more than once");
log.log(Level.WARNING, "Assertion failed.", e);
throw e;
}
boolean reportedError = false;
if (error != null) {
if (isErrorOfType(error, EofException.class, IOException.class)) {
log.log(Level.FINE,
error,
() -> "Network connection was unexpectedly terminated: " + parent.jettyRequest.getRequestURI());
parent.metricReporter.prematurelyClosed();
} else if (isErrorOfType(error, TimeoutException.class)) {
log.log(Level.FINE,
error,
() -> "Request/stream was timed out by Jetty: " + parent.jettyRequest.getRequestURI());
} else if (!isErrorOfType(error, OverloadException.class, BindingNotFoundException.class, RequestException.class)) {
log.log(Level.WARNING, "Request failed: " + parent.jettyRequest.getRequestURI(), error);
}
reportedError = true;
parent.metricReporter.failedResponse();
} else {
parent.metricReporter.successfulResponse();
}
try {
parent.async.complete();
log.finest(() -> "Request completed successfully: " + parent.jettyRequest.getRequestURI());
} catch (Throwable throwable) {
Level level = reportedError ? Level.FINE: Level.WARNING;
log.log(level, "Async.complete failed", throwable);
}
};
}
private static void shutdownConnectionGracefullyIfThresholdReached(Request request) {
ConnectorConfig connectorConfig = getConnector(request).connectorConfig();
int maxRequestsPerConnection = connectorConfig.maxRequestsPerConnection();
Connection connection = RequestUtils.getConnection(request);
if (maxRequestsPerConnection > 0) {
if (connection.getMessagesIn() >= maxRequestsPerConnection) {
gracefulShutdown(connection, "max-req-per-conn-exceeded");
}
}
double maxConnectionLifeInSeconds = connectorConfig.maxConnectionLife();
if (maxConnectionLifeInSeconds > 0) {
long createdAt = connection.getCreatedTimeStamp();
Instant expiredAt = Instant.ofEpochMilli((long) (createdAt + maxConnectionLifeInSeconds * 1000));
boolean isExpired = Instant.now().isAfter(expiredAt);
if (isExpired) {
gracefulShutdown(connection, "max-conn-life-exceeded");
}
}
}
@SafeVarargs
@SuppressWarnings("varargs")
private static boolean isErrorOfType(Throwable throwable, Class<? extends Throwable>... handledTypes) {
return Arrays.stream(handledTypes)
.anyMatch(
exceptionType -> exceptionType.isInstance(throwable)
|| throwable instanceof CompletionException && exceptionType.isInstance(throwable.getCause()));
}
@SuppressWarnings("try")
private ServletRequestReader handleRequest() throws IOException {
HttpRequest jdiscRequest = HttpRequestFactory.newJDiscRequest(jDiscContext.container, jettyRequest);
ContentChannel requestContentChannel;
try (ResourceReference ref = References.fromResource(jdiscRequest)) {
HttpRequestFactory.copyHeaders(jettyRequest, jdiscRequest);
requestContentChannel = requestHandler.handleRequest(jdiscRequest, servletResponseController.responseHandler);
}
ServletInputStream servletInputStream = jettyRequest.getInputStream();
ServletRequestReader servletRequestReader = new ServletRequestReader(servletInputStream,
requestContentChannel,
jDiscContext.janitor,
metricReporter);
servletInputStream.setReadListener(servletRequestReader);
return servletRequestReader;
}
private static void onError(CompletableFuture<?> future, Consumer<Throwable> errorHandler) {
future.whenComplete((result, exception) -> {
if (exception != null) {
errorHandler.accept(exception);
}
});
}
ContentChannel handleRequestFilterResponse(Response response) {
try {
jettyRequest.getInputStream().close();
ContentChannel responseContentChannel = servletResponseController.responseHandler.handleResponse(response);
servletResponseController.finishedFuture().whenComplete(completeRequestCallback);
return responseContentChannel;
} catch (IOException e) {
throw throwUnchecked(e);
}
}
private static RequestHandler newRequestHandler(JDiscContext context,
AccessLogEntry accessLogEntry,
HttpServletRequest servletRequest) {
RequestHandler requestHandler = wrapHandlerIfFormPost(
new FilteringRequestHandler(context.filterResolver, (Request)servletRequest),
servletRequest, context.serverConfig.removeRawPostBodyForWwwUrlEncodedPost());
return new AccessLoggingRequestHandler(requestHandler, accessLogEntry);
}
private static RequestHandler wrapHandlerIfFormPost(RequestHandler requestHandler,
HttpServletRequest servletRequest,
boolean removeBodyForFormPost) {
if (!servletRequest.getMethod().equals("POST")) {
return requestHandler;
}
String contentType = servletRequest.getHeader(HttpHeaders.Names.CONTENT_TYPE);
if (contentType == null) {
return requestHandler;
}
if (!contentType.startsWith(APPLICATION_X_WWW_FORM_URLENCODED)) {
return requestHandler;
}
return new FormPostRequestHandler(requestHandler, getCharsetName(contentType), removeBodyForFormPost);
}
private static String getCharsetName(String contentType) {
if (!contentType.startsWith(CHARSET_ANNOTATION, APPLICATION_X_WWW_FORM_URLENCODED.length())) {
return StandardCharsets.UTF_8.name();
}
return contentType.substring(APPLICATION_X_WWW_FORM_URLENCODED.length() + CHARSET_ANNOTATION.length());
}
} |
... !? So what effect will closing the stream have, then? Or do you mean that it results in an exception, rather than an automatic retry? | public void requireThatConnectionIsClosedAfterXRequests() throws Exception {
final int MAX_REQUESTS = 10;
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.maxRequestsPerConnection(MAX_REQUESTS)
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.NEED_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
ServerConfig.Builder serverConfig = new ServerConfig.Builder()
.connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true));
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
serverConfig,
connectorConfig,
binder -> {});
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader(CONNECTION);
}
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader(CONNECTION, is(CLOSE));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
try {
client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
fail();
} catch (ExecutionException e) {
assertEquals(e.getMessage(), "org.apache.hc.core5.http2.H2StreamResetException: Stream refused");
}
}
assertTrue(driver.close());
} | public void requireThatConnectionIsClosedAfterXRequests() throws Exception {
final int MAX_REQUESTS = 10;
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.maxRequestsPerConnection(MAX_REQUESTS)
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.NEED_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
ServerConfig.Builder serverConfig = new ServerConfig.Builder()
.connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true));
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
serverConfig,
connectorConfig,
binder -> {});
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader(CONNECTION);
}
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader(CONNECTION, is(CLOSE));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
try {
client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
fail();
} catch (ExecutionException e) {
assertEquals(e.getMessage(), "org.apache.hc.core5.http2.H2StreamResetException: Stream refused");
}
}
assertTrue(driver.close());
} | class HttpServerTest {
@Rule
public TemporaryFolder tmpFolder = new TemporaryFolder();
@Test
public void requireThatServerCanListenToRandomPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
assertNotEquals(0, driver.server().getListenPort());
assertTrue(driver.close());
}
@Test
public void requireThatServerCanNotListenToBoundPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
try {
JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.listenPort(driver.server().getListenPort())
);
} catch (final Throwable t) {
assertThat(t.getCause(), instanceOf(BindException.class));
}
assertTrue(driver.close());
}
@Test
public void requireThatBindingSetNotFoundReturns404() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder()
.developerMode(true),
new ConnectorConfig.Builder(),
newBindingSetSelector("unknown"));
driver.client().get("/status.html")
.expectStatusCode(is(NOT_FOUND))
.expectContent(containsPattern(Pattern.compile(
Pattern.quote(BindingSetNotFoundException.class.getName()) +
": No binding set named 'unknown'\\.\n\tat .+",
Pattern.DOTALL | Pattern.MULTILINE)));
assertTrue(driver.close());
}
@Test
public void requireThatTooLongInitLineReturns414() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.requestHeaderSize(1));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
assertTrue(driver.close());
}
@Test
public void requireThatAccessLogIsCalledForRequestRejectedByJetty() throws Exception {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder().requestHeaderSize(1),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
assertEquals(414, entry.statusCode().getAsInt());
assertThat(driver.close(), is(true));
}
@Test
public void requireThatServerCanEcho() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanEchoCompressed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
SimpleHttpClient client = driver.newClient(true);
client.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanHandleMultipleRequests() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostDoesNotRemoveContentByDefault() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostKeepsContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), false);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostRemovesContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithCharsetSpecifiedWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(X_DISABLE_CHUNKING, "true")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=UTF-8")
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatEmptyFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b&c=d")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUriParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormAndUriParametersAreMerged() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d1")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("c=d2&e=f")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d1, d2], e=[f]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormCharsetIsHonored() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=ISO-8859-1")
.setBinaryContent(new byte[]{66, (byte) 230, 114, 61, 98, 108, (byte) 229})
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{B\u00e6r=[bl\u00e5]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUnknownFormCharsetIsTreatedAsBadRequest() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=FLARBA-GARBA-7")
.setContent("a=b")
.execute();
response.expectStatusCode(is(UNSUPPORTED_MEDIA_TYPE));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithPercentEncodedContentIsDecoded() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("%20%3D%C3%98=%22%25+")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{ =\u00d8=[\"% ]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithThrowingHandlerIsExceptionSafe() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ThrowingHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b")
.execute();
response.expectStatusCode(is(INTERNAL_SERVER_ERROR));
assertTrue(driver.close());
}
@Test
public void requireThatMultiPostWorks() throws Exception {
final String startTxtContent = "this is a test for POST.";
final String updaterConfContent
= "identifier = updater\n"
+ "server_type = gds\n";
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.setMultipartContent(
newFileBody("start.txt", startTxtContent),
newFileBody("updater.conf", updaterConfContent))
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString(startTxtContent))
.expectContent(containsString(updaterConfContent));
}
@Test
public void requireThatRequestCookiesAreReceived() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookiePrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(COOKIE, "foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString("[foo=bar]"));
assertTrue(driver.close());
}
@Test
public void requireThatSetCookieHeaderIsCorrect() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookieSetterRequestHandler(
new Cookie("foo", "bar")
.setDomain(".localhost")
.setHttpOnly(true)
.setPath("/foopath")
.setSecure(true)));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("Set-Cookie",
is("foo=bar; Path=/foopath; Domain=.localhost; Secure; HttpOnly"));
assertTrue(driver.close());
}
@Test
public void requireThatTimeoutWorks() throws Exception {
final UnresponsiveHandler requestHandler = new UnresponsiveHandler();
final JettyTestDriver driver = JettyTestDriver.newInstance(requestHandler);
driver.client().get("/status.html")
.expectStatusCode(is(GATEWAY_TIMEOUT));
ResponseDispatch.newInstance(OK).dispatch(requestHandler.responseHandler);
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithNullValueIsOmitted() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", null));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader("X-Foo");
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithEmptyValueIsAllowed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", ""));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("X-Foo", is(""));
assertTrue(driver.close());
}
@Test
public void requireThatNoConnectionHeaderMeansKeepAliveInHttp11KeepAliveDisabled() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler(CONNECTION, CLOSE));
driver.client().get("/status.html")
.expectHeader(CONNECTION, is(CLOSE));
assertThat(driver.close(), is(true));
}
@Test
@Test
public void requireThatServerCanRespondToSslRequest() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
final JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanRespondToHttp2Request() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertNull(response.getBodyText());
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
ConnectionLogEntry entry = connectionLog.logEntries().get(0);
assertEquals("HTTP/2.0", entry.httpProtocol().get());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerRejectsRequestsForNonWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = createSslWithTlsClientAuthenticationEnforcer(certificateFile, privateKeyFile);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/dummy.html")
.expectStatusCode(is(UNAUTHORIZED));
assertTrue(driver.close());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerAllowsRequestForWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectedAtReturnsNonZero() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ConnectedAtRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectContent(matchesPattern("\\d{13,}"));
assertThat(driver.close(), is(true));
}
@Test
public void requireThatGzipEncodingRequestsAreAutomaticallyDecompressed() throws Exception {
JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
String requestContent = generateContent('a', 30);
ResponseValidator response = driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setGzipContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatResponseStatsAreCollected() throws Exception {
RequestTypeHandler handler = new RequestTypeHandler();
JettyTestDriver driver = JettyTestDriver.newInstance(handler);
HttpResponseStatisticsCollector statisticsCollector = ((AbstractHandlerContainer) driver.server().server().getHandler())
.getChildHandlerByClass(HttpResponseStatisticsCollector.class);
{
List<HttpResponseStatisticsCollector.StatisticsEntry> stats = statisticsCollector.takeStatistics();
assertEquals(0, stats.size());
}
{
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("POST", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("write", entry.requestType);
assertEquals(1, entry.value);
}
{
driver.client().newGet("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("GET", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("read", entry.requestType);
assertEquals(1, entry.value);
}
{
handler.setRequestType(Request.RequestType.READ);
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("Handler overrides request type", "read", entry.requestType);
}
assertTrue(driver.close());
}
private HttpResponseStatisticsCollector.StatisticsEntry waitForStatistics(HttpResponseStatisticsCollector
statisticsCollector) {
List<HttpResponseStatisticsCollector.StatisticsEntry> entries = Collections.emptyList();
int tries = 0;
while (entries.isEmpty() && tries < 10000) {
entries = statisticsCollector.takeStatistics();
if (entries.isEmpty())
try {Thread.sleep(100); } catch (InterruptedException e) {}
tries++;
}
assertEquals(1, entries.size());
return entries.get(0);
}
@Test
public void requireThatConnectionThrottleDoesNotBlockConnectionsBelowThreshold() throws Exception {
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.throttling(new Throttling.Builder()
.enabled(true)
.maxAcceptRate(10)
.maxHeapUtilization(1.0)
.maxConnections(10)));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectionIsTrackedInConnectionLog() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
Module overrideModule = binder -> binder.bind(ConnectionLog.class).toInstance(connectionLog);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new OkRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.NEED, overrideModule);
int listenPort = driver.server().getListenPort();
StringBuilder builder = new StringBuilder();
for (int i = 0; i < 1000; i++) {
builder.append(i);
}
byte[] content = builder.toString().getBytes();
for (int i = 0; i < 100; i++) {
driver.client().newPost("/status.html").setBinaryContent(content).execute()
.expectStatusCode(is(OK));
}
assertTrue(driver.close());
List<ConnectionLogEntry> logEntries = connectionLog.logEntries();
Assertions.assertThat(logEntries).hasSize(1);
ConnectionLogEntry logEntry = logEntries.get(0);
assertEquals(4, UUID.fromString(logEntry.id()).version());
Assertions.assertThat(logEntry.timestamp()).isAfter(Instant.EPOCH);
Assertions.assertThat(logEntry.requests()).hasValue(100L);
Assertions.assertThat(logEntry.responses()).hasValue(100L);
Assertions.assertThat(logEntry.peerAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localPort()).hasValue(listenPort);
Assertions.assertThat(logEntry.httpBytesReceived()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(100000L));
Assertions.assertThat(logEntry.httpBytesSent()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(10000L));
Assertions.assertThat(logEntry.sslProtocol()).hasValueSatisfying(TlsContext.ALLOWED_PROTOCOLS::contains);
Assertions.assertThat(logEntry.sslPeerSubject()).hasValue("CN=localhost");
Assertions.assertThat(logEntry.sslCipherSuite()).hasValueSatisfying(cipher -> Assertions.assertThat(cipher).isNotBlank());
Assertions.assertThat(logEntry.sslSessionId()).hasValueSatisfying(sessionId -> Assertions.assertThat(sessionId).hasSize(64));
Assertions.assertThat(logEntry.sslPeerNotBefore()).hasValue(Instant.EPOCH);
Assertions.assertThat(logEntry.sslPeerNotAfter()).hasValue(Instant.EPOCH.plus(100_000, ChronoUnit.DAYS));
}
@Test
public void requireThatRequestIsTrackedInAccessLog() throws IOException, InterruptedException {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder(),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().newPost("/status.html").setContent("abcdef").execute().expectStatusCode(is(OK));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
Assertions.assertThat(entry.statusCode()).hasValue(200);
Assertions.assertThat(entry.requestSize()).hasValue(6);
assertThat(driver.close(), is(true));
}
@Test
public void requireThatRequestsPerConnectionMetricIsAggregated() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
var metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
driver.client().get("/").expectStatusCode(is(OK));
assertThat(driver.close(), is(true));
verify(metricConsumer.mockitoMock(), atLeast(1))
.set(MetricDefinitions.REQUESTS_PER_CONNECTION, 1L, MetricConsumerMock.STATIC_CONTEXT);
}
@Test
public void uriWithEmptyPathSegmentIsAllowed() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
String uriPath = "/path/with/empty
driver.client().get(uriPath).expectStatusCode(is(OK));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
}
private static CloseableHttpAsyncClient createHttp2Client(JettyTestDriver driver) {
TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create()
.setSslContext(driver.sslContext())
.build();
var client = H2AsyncClientBuilder.create()
.disableAutomaticRetries()
.setTlsStrategy(tlsStrategy)
.build();
client.start();
return client;
}
private static JettyTestDriver createSslWithTlsClientAuthenticationEnforcer(Path certificateFile, Path privateKeyFile) {
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.tlsClientAuthEnforcer(
new ConnectorConfig.TlsClientAuthEnforcer.Builder()
.enable(true)
.pathWhitelist("/status.html"))
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.WANT_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
return JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder().connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true)),
connectorConfig,
binder -> {});
}
private static RequestHandler mockRequestHandler() {
final RequestHandler mockRequestHandler = mock(RequestHandler.class);
when(mockRequestHandler.refer()).thenReturn(References.NOOP_REFERENCE);
return mockRequestHandler;
}
private static String generateContent(final char c, final int len) {
final StringBuilder ret = new StringBuilder(len);
for (int i = 0; i < len; ++i) {
ret.append(c);
}
return ret.toString();
}
private static JettyTestDriver newDriverWithFormPostContentRemoved(RequestHandler requestHandler,
boolean removeFormPostBody) throws Exception {
return JettyTestDriver.newConfiguredInstance(
requestHandler,
new ServerConfig.Builder()
.removeRawPostBodyForWwwUrlEncodedPost(removeFormPostBody),
new ConnectorConfig.Builder());
}
private static FormBodyPart newFileBody(final String fileName, final String fileContent) {
return FormBodyPartBuilder.create()
.setBody(
new StringBody(fileContent, ContentType.TEXT_PLAIN) {
@Override public String getFilename() { return fileName; }
@Override public String getMimeType() { return ""; }
@Override public String getCharset() { return null; }
})
.setName(fileName)
.build();
}
private static class ConnectedAtRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpRequest httpRequest = (HttpRequest)request;
final String connectedAt = String.valueOf(httpRequest.getConnectedAt(TimeUnit.MILLISECONDS));
final ContentChannel ch = handler.handleResponse(new Response(OK));
ch.write(ByteBuffer.wrap(connectedAt.getBytes(StandardCharsets.UTF_8)), null);
ch.close(null);
return null;
}
}
private static class CookieSetterRequestHandler extends AbstractRequestHandler {
final Cookie cookie;
CookieSetterRequestHandler(final Cookie cookie) {
this.cookie = cookie;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpResponse response = HttpResponse.newInstance(OK);
response.encodeSetCookieHeader(Collections.singletonList(cookie));
ResponseDispatch.newInstance(response).dispatch(handler);
return null;
}
}
private static class CookiePrinterRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final List<Cookie> cookies = new ArrayList<>(((HttpRequest)request).decodeCookieHeader());
Collections.sort(cookies, new CookieComparator());
final ContentChannel out = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
out.write(StandardCharsets.UTF_8.encode(cookies.toString()), null);
out.close(null);
return null;
}
}
private static class ParameterPrinterRequestHandler extends AbstractRequestHandler {
private static final CompletionHandler NULL_COMPLETION_HANDLER = null;
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Map<String, List<String>> parameters = new TreeMap<>(((HttpRequest)request).parameters());
ContentChannel responseContentChannel = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
responseContentChannel.write(ByteBuffer.wrap(parameters.toString().getBytes(StandardCharsets.UTF_8)),
NULL_COMPLETION_HANDLER);
return responseContentChannel;
}
}
private static class RequestTypeHandler extends AbstractRequestHandler {
private Request.RequestType requestType = null;
public void setRequestType(Request.RequestType requestType) {
this.requestType = requestType;
}
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
response.setRequestType(requestType);
return handler.handleResponse(response);
}
}
private static class ThrowingHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
throw new RuntimeException("Deliberately thrown exception");
}
}
private static class UnresponsiveHandler extends AbstractRequestHandler {
ResponseHandler responseHandler;
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
request.setTimeout(100, TimeUnit.MILLISECONDS);
responseHandler = handler;
return null;
}
}
private static class OkRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
handler.handleResponse(response).close(null);
return NullContent.INSTANCE;
}
}
private static class EchoWithHeaderRequestHandler extends AbstractRequestHandler {
final String headerName;
final String headerValue;
EchoWithHeaderRequestHandler(final String headerName, final String headerValue) {
this.headerName = headerName;
this.headerValue = headerValue;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final Response response = new Response(OK);
response.headers().add(headerName, headerValue);
return handler.handleResponse(response);
}
}
private static Module newBindingSetSelector(final String setName) {
return new AbstractModule() {
@Override
protected void configure() {
bind(BindingSetSelector.class).toInstance(new BindingSetSelector() {
@Override
public String select(final URI uri) {
return setName;
}
});
}
};
}
private static class CookieComparator implements Comparator<Cookie> {
@Override
public int compare(final Cookie lhs, final Cookie rhs) {
return lhs.getName().compareTo(rhs.getName());
}
}
} | class HttpServerTest {
@Rule
public TemporaryFolder tmpFolder = new TemporaryFolder();
@Test
public void requireThatServerCanListenToRandomPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
assertNotEquals(0, driver.server().getListenPort());
assertTrue(driver.close());
}
@Test
public void requireThatServerCanNotListenToBoundPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
try {
JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.listenPort(driver.server().getListenPort())
);
} catch (final Throwable t) {
assertThat(t.getCause(), instanceOf(BindException.class));
}
assertTrue(driver.close());
}
@Test
public void requireThatBindingSetNotFoundReturns404() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder()
.developerMode(true),
new ConnectorConfig.Builder(),
newBindingSetSelector("unknown"));
driver.client().get("/status.html")
.expectStatusCode(is(NOT_FOUND))
.expectContent(containsPattern(Pattern.compile(
Pattern.quote(BindingSetNotFoundException.class.getName()) +
": No binding set named 'unknown'\\.\n\tat .+",
Pattern.DOTALL | Pattern.MULTILINE)));
assertTrue(driver.close());
}
@Test
public void requireThatTooLongInitLineReturns414() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.requestHeaderSize(1));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
assertTrue(driver.close());
}
@Test
public void requireThatAccessLogIsCalledForRequestRejectedByJetty() throws Exception {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder().requestHeaderSize(1),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
assertEquals(414, entry.statusCode().getAsInt());
assertThat(driver.close(), is(true));
}
@Test
public void requireThatServerCanEcho() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanEchoCompressed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
SimpleHttpClient client = driver.newClient(true);
client.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanHandleMultipleRequests() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostDoesNotRemoveContentByDefault() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostKeepsContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), false);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostRemovesContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithCharsetSpecifiedWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(X_DISABLE_CHUNKING, "true")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=UTF-8")
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatEmptyFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b&c=d")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUriParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormAndUriParametersAreMerged() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d1")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("c=d2&e=f")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d1, d2], e=[f]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormCharsetIsHonored() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=ISO-8859-1")
.setBinaryContent(new byte[]{66, (byte) 230, 114, 61, 98, 108, (byte) 229})
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{B\u00e6r=[bl\u00e5]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUnknownFormCharsetIsTreatedAsBadRequest() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=FLARBA-GARBA-7")
.setContent("a=b")
.execute();
response.expectStatusCode(is(UNSUPPORTED_MEDIA_TYPE));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithPercentEncodedContentIsDecoded() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("%20%3D%C3%98=%22%25+")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{ =\u00d8=[\"% ]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithThrowingHandlerIsExceptionSafe() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ThrowingHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b")
.execute();
response.expectStatusCode(is(INTERNAL_SERVER_ERROR));
assertTrue(driver.close());
}
@Test
public void requireThatMultiPostWorks() throws Exception {
final String startTxtContent = "this is a test for POST.";
final String updaterConfContent
= "identifier = updater\n"
+ "server_type = gds\n";
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.setMultipartContent(
newFileBody("start.txt", startTxtContent),
newFileBody("updater.conf", updaterConfContent))
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString(startTxtContent))
.expectContent(containsString(updaterConfContent));
}
@Test
public void requireThatRequestCookiesAreReceived() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookiePrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(COOKIE, "foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString("[foo=bar]"));
assertTrue(driver.close());
}
@Test
public void requireThatSetCookieHeaderIsCorrect() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookieSetterRequestHandler(
new Cookie("foo", "bar")
.setDomain(".localhost")
.setHttpOnly(true)
.setPath("/foopath")
.setSecure(true)));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("Set-Cookie",
is("foo=bar; Path=/foopath; Domain=.localhost; Secure; HttpOnly"));
assertTrue(driver.close());
}
@Test
public void requireThatTimeoutWorks() throws Exception {
final UnresponsiveHandler requestHandler = new UnresponsiveHandler();
final JettyTestDriver driver = JettyTestDriver.newInstance(requestHandler);
driver.client().get("/status.html")
.expectStatusCode(is(GATEWAY_TIMEOUT));
ResponseDispatch.newInstance(OK).dispatch(requestHandler.responseHandler);
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithNullValueIsOmitted() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", null));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader("X-Foo");
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithEmptyValueIsAllowed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", ""));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("X-Foo", is(""));
assertTrue(driver.close());
}
@Test
public void requireThatNoConnectionHeaderMeansKeepAliveInHttp11KeepAliveDisabled() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler(CONNECTION, CLOSE));
driver.client().get("/status.html")
.expectHeader(CONNECTION, is(CLOSE));
assertThat(driver.close(), is(true));
}
@Test
@Test
public void requireThatServerCanRespondToSslRequest() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
final JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanRespondToHttp2Request() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertNull(response.getBodyText());
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
ConnectionLogEntry entry = connectionLog.logEntries().get(0);
assertEquals("HTTP/2.0", entry.httpProtocol().get());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerRejectsRequestsForNonWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = createSslWithTlsClientAuthenticationEnforcer(certificateFile, privateKeyFile);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/dummy.html")
.expectStatusCode(is(UNAUTHORIZED));
assertTrue(driver.close());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerAllowsRequestForWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectedAtReturnsNonZero() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ConnectedAtRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectContent(matchesPattern("\\d{13,}"));
assertThat(driver.close(), is(true));
}
@Test
public void requireThatGzipEncodingRequestsAreAutomaticallyDecompressed() throws Exception {
JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
String requestContent = generateContent('a', 30);
ResponseValidator response = driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setGzipContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatResponseStatsAreCollected() throws Exception {
RequestTypeHandler handler = new RequestTypeHandler();
JettyTestDriver driver = JettyTestDriver.newInstance(handler);
HttpResponseStatisticsCollector statisticsCollector = ((AbstractHandlerContainer) driver.server().server().getHandler())
.getChildHandlerByClass(HttpResponseStatisticsCollector.class);
{
List<HttpResponseStatisticsCollector.StatisticsEntry> stats = statisticsCollector.takeStatistics();
assertEquals(0, stats.size());
}
{
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("POST", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("write", entry.requestType);
assertEquals(1, entry.value);
}
{
driver.client().newGet("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("GET", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("read", entry.requestType);
assertEquals(1, entry.value);
}
{
handler.setRequestType(Request.RequestType.READ);
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("Handler overrides request type", "read", entry.requestType);
}
assertTrue(driver.close());
}
private HttpResponseStatisticsCollector.StatisticsEntry waitForStatistics(HttpResponseStatisticsCollector
statisticsCollector) {
List<HttpResponseStatisticsCollector.StatisticsEntry> entries = Collections.emptyList();
int tries = 0;
while (entries.isEmpty() && tries < 10000) {
entries = statisticsCollector.takeStatistics();
if (entries.isEmpty())
try {Thread.sleep(100); } catch (InterruptedException e) {}
tries++;
}
assertEquals(1, entries.size());
return entries.get(0);
}
@Test
public void requireThatConnectionThrottleDoesNotBlockConnectionsBelowThreshold() throws Exception {
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.throttling(new Throttling.Builder()
.enabled(true)
.maxAcceptRate(10)
.maxHeapUtilization(1.0)
.maxConnections(10)));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectionIsTrackedInConnectionLog() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
Module overrideModule = binder -> binder.bind(ConnectionLog.class).toInstance(connectionLog);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new OkRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.NEED, overrideModule);
int listenPort = driver.server().getListenPort();
StringBuilder builder = new StringBuilder();
for (int i = 0; i < 1000; i++) {
builder.append(i);
}
byte[] content = builder.toString().getBytes();
for (int i = 0; i < 100; i++) {
driver.client().newPost("/status.html").setBinaryContent(content).execute()
.expectStatusCode(is(OK));
}
assertTrue(driver.close());
List<ConnectionLogEntry> logEntries = connectionLog.logEntries();
Assertions.assertThat(logEntries).hasSize(1);
ConnectionLogEntry logEntry = logEntries.get(0);
assertEquals(4, UUID.fromString(logEntry.id()).version());
Assertions.assertThat(logEntry.timestamp()).isAfter(Instant.EPOCH);
Assertions.assertThat(logEntry.requests()).hasValue(100L);
Assertions.assertThat(logEntry.responses()).hasValue(100L);
Assertions.assertThat(logEntry.peerAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localPort()).hasValue(listenPort);
Assertions.assertThat(logEntry.httpBytesReceived()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(100000L));
Assertions.assertThat(logEntry.httpBytesSent()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(10000L));
Assertions.assertThat(logEntry.sslProtocol()).hasValueSatisfying(TlsContext.ALLOWED_PROTOCOLS::contains);
Assertions.assertThat(logEntry.sslPeerSubject()).hasValue("CN=localhost");
Assertions.assertThat(logEntry.sslCipherSuite()).hasValueSatisfying(cipher -> Assertions.assertThat(cipher).isNotBlank());
Assertions.assertThat(logEntry.sslSessionId()).hasValueSatisfying(sessionId -> Assertions.assertThat(sessionId).hasSize(64));
Assertions.assertThat(logEntry.sslPeerNotBefore()).hasValue(Instant.EPOCH);
Assertions.assertThat(logEntry.sslPeerNotAfter()).hasValue(Instant.EPOCH.plus(100_000, ChronoUnit.DAYS));
}
@Test
public void requireThatRequestIsTrackedInAccessLog() throws IOException, InterruptedException {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder(),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().newPost("/status.html").setContent("abcdef").execute().expectStatusCode(is(OK));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
Assertions.assertThat(entry.statusCode()).hasValue(200);
Assertions.assertThat(entry.requestSize()).hasValue(6);
assertThat(driver.close(), is(true));
}
@Test
public void requireThatRequestsPerConnectionMetricIsAggregated() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
var metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
driver.client().get("/").expectStatusCode(is(OK));
assertThat(driver.close(), is(true));
verify(metricConsumer.mockitoMock(), atLeast(1))
.set(MetricDefinitions.REQUESTS_PER_CONNECTION, 1L, MetricConsumerMock.STATIC_CONTEXT);
}
@Test
public void uriWithEmptyPathSegmentIsAllowed() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
String uriPath = "/path/with/empty
driver.client().get(uriPath).expectStatusCode(is(OK));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
}
private static CloseableHttpAsyncClient createHttp2Client(JettyTestDriver driver) {
TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create()
.setSslContext(driver.sslContext())
.build();
var client = H2AsyncClientBuilder.create()
.disableAutomaticRetries()
.setTlsStrategy(tlsStrategy)
.build();
client.start();
return client;
}
private static JettyTestDriver createSslWithTlsClientAuthenticationEnforcer(Path certificateFile, Path privateKeyFile) {
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.tlsClientAuthEnforcer(
new ConnectorConfig.TlsClientAuthEnforcer.Builder()
.enable(true)
.pathWhitelist("/status.html"))
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.WANT_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
return JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder().connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true)),
connectorConfig,
binder -> {});
}
private static RequestHandler mockRequestHandler() {
final RequestHandler mockRequestHandler = mock(RequestHandler.class);
when(mockRequestHandler.refer()).thenReturn(References.NOOP_REFERENCE);
return mockRequestHandler;
}
private static String generateContent(final char c, final int len) {
final StringBuilder ret = new StringBuilder(len);
for (int i = 0; i < len; ++i) {
ret.append(c);
}
return ret.toString();
}
private static JettyTestDriver newDriverWithFormPostContentRemoved(RequestHandler requestHandler,
boolean removeFormPostBody) throws Exception {
return JettyTestDriver.newConfiguredInstance(
requestHandler,
new ServerConfig.Builder()
.removeRawPostBodyForWwwUrlEncodedPost(removeFormPostBody),
new ConnectorConfig.Builder());
}
private static FormBodyPart newFileBody(final String fileName, final String fileContent) {
return FormBodyPartBuilder.create()
.setBody(
new StringBody(fileContent, ContentType.TEXT_PLAIN) {
@Override public String getFilename() { return fileName; }
@Override public String getMimeType() { return ""; }
@Override public String getCharset() { return null; }
})
.setName(fileName)
.build();
}
private static class ConnectedAtRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpRequest httpRequest = (HttpRequest)request;
final String connectedAt = String.valueOf(httpRequest.getConnectedAt(TimeUnit.MILLISECONDS));
final ContentChannel ch = handler.handleResponse(new Response(OK));
ch.write(ByteBuffer.wrap(connectedAt.getBytes(StandardCharsets.UTF_8)), null);
ch.close(null);
return null;
}
}
private static class CookieSetterRequestHandler extends AbstractRequestHandler {
final Cookie cookie;
CookieSetterRequestHandler(final Cookie cookie) {
this.cookie = cookie;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpResponse response = HttpResponse.newInstance(OK);
response.encodeSetCookieHeader(Collections.singletonList(cookie));
ResponseDispatch.newInstance(response).dispatch(handler);
return null;
}
}
private static class CookiePrinterRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final List<Cookie> cookies = new ArrayList<>(((HttpRequest)request).decodeCookieHeader());
Collections.sort(cookies, new CookieComparator());
final ContentChannel out = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
out.write(StandardCharsets.UTF_8.encode(cookies.toString()), null);
out.close(null);
return null;
}
}
private static class ParameterPrinterRequestHandler extends AbstractRequestHandler {
private static final CompletionHandler NULL_COMPLETION_HANDLER = null;
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Map<String, List<String>> parameters = new TreeMap<>(((HttpRequest)request).parameters());
ContentChannel responseContentChannel = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
responseContentChannel.write(ByteBuffer.wrap(parameters.toString().getBytes(StandardCharsets.UTF_8)),
NULL_COMPLETION_HANDLER);
return responseContentChannel;
}
}
private static class RequestTypeHandler extends AbstractRequestHandler {
private Request.RequestType requestType = null;
public void setRequestType(Request.RequestType requestType) {
this.requestType = requestType;
}
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
response.setRequestType(requestType);
return handler.handleResponse(response);
}
}
private static class ThrowingHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
throw new RuntimeException("Deliberately thrown exception");
}
}
private static class UnresponsiveHandler extends AbstractRequestHandler {
ResponseHandler responseHandler;
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
request.setTimeout(100, TimeUnit.MILLISECONDS);
responseHandler = handler;
return null;
}
}
private static class OkRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
handler.handleResponse(response).close(null);
return NullContent.INSTANCE;
}
}
private static class EchoWithHeaderRequestHandler extends AbstractRequestHandler {
final String headerName;
final String headerValue;
EchoWithHeaderRequestHandler(final String headerName, final String headerValue) {
this.headerName = headerName;
this.headerValue = headerValue;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final Response response = new Response(OK);
response.headers().add(headerName, headerValue);
return handler.handleResponse(response);
}
}
private static Module newBindingSetSelector(final String setName) {
return new AbstractModule() {
@Override
protected void configure() {
bind(BindingSetSelector.class).toInstance(new BindingSetSelector() {
@Override
public String select(final URI uri) {
return setName;
}
});
}
};
}
private static class CookieComparator implements Comparator<Cookie> {
@Override
public int compare(final Cookie lhs, final Cookie rhs) {
return lhs.getName().compareTo(rhs.getName());
}
}
} | |
Yes, this is the sad thing with Apache http client 5 and the reason why this is "do not merge". | public void requireThatConnectionIsClosedAfterXRequests() throws Exception {
final int MAX_REQUESTS = 10;
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.maxRequestsPerConnection(MAX_REQUESTS)
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.NEED_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
ServerConfig.Builder serverConfig = new ServerConfig.Builder()
.connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true));
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
serverConfig,
connectorConfig,
binder -> {});
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader(CONNECTION);
}
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader(CONNECTION, is(CLOSE));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
try {
client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
fail();
} catch (ExecutionException e) {
assertEquals(e.getMessage(), "org.apache.hc.core5.http2.H2StreamResetException: Stream refused");
}
}
assertTrue(driver.close());
} | public void requireThatConnectionIsClosedAfterXRequests() throws Exception {
final int MAX_REQUESTS = 10;
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.maxRequestsPerConnection(MAX_REQUESTS)
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.NEED_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
ServerConfig.Builder serverConfig = new ServerConfig.Builder()
.connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true));
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
serverConfig,
connectorConfig,
binder -> {});
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader(CONNECTION);
}
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader(CONNECTION, is(CLOSE));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
for (int i = 0; i < MAX_REQUESTS - 1; i++) {
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
try {
client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
fail();
} catch (ExecutionException e) {
assertEquals(e.getMessage(), "org.apache.hc.core5.http2.H2StreamResetException: Stream refused");
}
}
assertTrue(driver.close());
} | class HttpServerTest {
@Rule
public TemporaryFolder tmpFolder = new TemporaryFolder();
@Test
public void requireThatServerCanListenToRandomPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
assertNotEquals(0, driver.server().getListenPort());
assertTrue(driver.close());
}
@Test
public void requireThatServerCanNotListenToBoundPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
try {
JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.listenPort(driver.server().getListenPort())
);
} catch (final Throwable t) {
assertThat(t.getCause(), instanceOf(BindException.class));
}
assertTrue(driver.close());
}
@Test
public void requireThatBindingSetNotFoundReturns404() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder()
.developerMode(true),
new ConnectorConfig.Builder(),
newBindingSetSelector("unknown"));
driver.client().get("/status.html")
.expectStatusCode(is(NOT_FOUND))
.expectContent(containsPattern(Pattern.compile(
Pattern.quote(BindingSetNotFoundException.class.getName()) +
": No binding set named 'unknown'\\.\n\tat .+",
Pattern.DOTALL | Pattern.MULTILINE)));
assertTrue(driver.close());
}
@Test
public void requireThatTooLongInitLineReturns414() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.requestHeaderSize(1));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
assertTrue(driver.close());
}
@Test
public void requireThatAccessLogIsCalledForRequestRejectedByJetty() throws Exception {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder().requestHeaderSize(1),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
assertEquals(414, entry.statusCode().getAsInt());
assertThat(driver.close(), is(true));
}
@Test
public void requireThatServerCanEcho() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanEchoCompressed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
SimpleHttpClient client = driver.newClient(true);
client.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanHandleMultipleRequests() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostDoesNotRemoveContentByDefault() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostKeepsContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), false);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostRemovesContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithCharsetSpecifiedWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(X_DISABLE_CHUNKING, "true")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=UTF-8")
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatEmptyFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b&c=d")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUriParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormAndUriParametersAreMerged() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d1")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("c=d2&e=f")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d1, d2], e=[f]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormCharsetIsHonored() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=ISO-8859-1")
.setBinaryContent(new byte[]{66, (byte) 230, 114, 61, 98, 108, (byte) 229})
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{B\u00e6r=[bl\u00e5]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUnknownFormCharsetIsTreatedAsBadRequest() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=FLARBA-GARBA-7")
.setContent("a=b")
.execute();
response.expectStatusCode(is(UNSUPPORTED_MEDIA_TYPE));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithPercentEncodedContentIsDecoded() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("%20%3D%C3%98=%22%25+")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{ =\u00d8=[\"% ]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithThrowingHandlerIsExceptionSafe() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ThrowingHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b")
.execute();
response.expectStatusCode(is(INTERNAL_SERVER_ERROR));
assertTrue(driver.close());
}
@Test
public void requireThatMultiPostWorks() throws Exception {
final String startTxtContent = "this is a test for POST.";
final String updaterConfContent
= "identifier = updater\n"
+ "server_type = gds\n";
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.setMultipartContent(
newFileBody("start.txt", startTxtContent),
newFileBody("updater.conf", updaterConfContent))
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString(startTxtContent))
.expectContent(containsString(updaterConfContent));
}
@Test
public void requireThatRequestCookiesAreReceived() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookiePrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(COOKIE, "foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString("[foo=bar]"));
assertTrue(driver.close());
}
@Test
public void requireThatSetCookieHeaderIsCorrect() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookieSetterRequestHandler(
new Cookie("foo", "bar")
.setDomain(".localhost")
.setHttpOnly(true)
.setPath("/foopath")
.setSecure(true)));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("Set-Cookie",
is("foo=bar; Path=/foopath; Domain=.localhost; Secure; HttpOnly"));
assertTrue(driver.close());
}
@Test
public void requireThatTimeoutWorks() throws Exception {
final UnresponsiveHandler requestHandler = new UnresponsiveHandler();
final JettyTestDriver driver = JettyTestDriver.newInstance(requestHandler);
driver.client().get("/status.html")
.expectStatusCode(is(GATEWAY_TIMEOUT));
ResponseDispatch.newInstance(OK).dispatch(requestHandler.responseHandler);
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithNullValueIsOmitted() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", null));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader("X-Foo");
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithEmptyValueIsAllowed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", ""));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("X-Foo", is(""));
assertTrue(driver.close());
}
@Test
public void requireThatNoConnectionHeaderMeansKeepAliveInHttp11KeepAliveDisabled() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler(CONNECTION, CLOSE));
driver.client().get("/status.html")
.expectHeader(CONNECTION, is(CLOSE));
assertThat(driver.close(), is(true));
}
@Test
@Test
public void requireThatServerCanRespondToSslRequest() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
final JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanRespondToHttp2Request() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertNull(response.getBodyText());
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
ConnectionLogEntry entry = connectionLog.logEntries().get(0);
assertEquals("HTTP/2.0", entry.httpProtocol().get());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerRejectsRequestsForNonWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = createSslWithTlsClientAuthenticationEnforcer(certificateFile, privateKeyFile);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/dummy.html")
.expectStatusCode(is(UNAUTHORIZED));
assertTrue(driver.close());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerAllowsRequestForWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectedAtReturnsNonZero() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ConnectedAtRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectContent(matchesPattern("\\d{13,}"));
assertThat(driver.close(), is(true));
}
@Test
public void requireThatGzipEncodingRequestsAreAutomaticallyDecompressed() throws Exception {
JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
String requestContent = generateContent('a', 30);
ResponseValidator response = driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setGzipContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatResponseStatsAreCollected() throws Exception {
RequestTypeHandler handler = new RequestTypeHandler();
JettyTestDriver driver = JettyTestDriver.newInstance(handler);
HttpResponseStatisticsCollector statisticsCollector = ((AbstractHandlerContainer) driver.server().server().getHandler())
.getChildHandlerByClass(HttpResponseStatisticsCollector.class);
{
List<HttpResponseStatisticsCollector.StatisticsEntry> stats = statisticsCollector.takeStatistics();
assertEquals(0, stats.size());
}
{
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("POST", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("write", entry.requestType);
assertEquals(1, entry.value);
}
{
driver.client().newGet("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("GET", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("read", entry.requestType);
assertEquals(1, entry.value);
}
{
handler.setRequestType(Request.RequestType.READ);
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("Handler overrides request type", "read", entry.requestType);
}
assertTrue(driver.close());
}
private HttpResponseStatisticsCollector.StatisticsEntry waitForStatistics(HttpResponseStatisticsCollector
statisticsCollector) {
List<HttpResponseStatisticsCollector.StatisticsEntry> entries = Collections.emptyList();
int tries = 0;
while (entries.isEmpty() && tries < 10000) {
entries = statisticsCollector.takeStatistics();
if (entries.isEmpty())
try {Thread.sleep(100); } catch (InterruptedException e) {}
tries++;
}
assertEquals(1, entries.size());
return entries.get(0);
}
@Test
public void requireThatConnectionThrottleDoesNotBlockConnectionsBelowThreshold() throws Exception {
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.throttling(new Throttling.Builder()
.enabled(true)
.maxAcceptRate(10)
.maxHeapUtilization(1.0)
.maxConnections(10)));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectionIsTrackedInConnectionLog() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
Module overrideModule = binder -> binder.bind(ConnectionLog.class).toInstance(connectionLog);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new OkRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.NEED, overrideModule);
int listenPort = driver.server().getListenPort();
StringBuilder builder = new StringBuilder();
for (int i = 0; i < 1000; i++) {
builder.append(i);
}
byte[] content = builder.toString().getBytes();
for (int i = 0; i < 100; i++) {
driver.client().newPost("/status.html").setBinaryContent(content).execute()
.expectStatusCode(is(OK));
}
assertTrue(driver.close());
List<ConnectionLogEntry> logEntries = connectionLog.logEntries();
Assertions.assertThat(logEntries).hasSize(1);
ConnectionLogEntry logEntry = logEntries.get(0);
assertEquals(4, UUID.fromString(logEntry.id()).version());
Assertions.assertThat(logEntry.timestamp()).isAfter(Instant.EPOCH);
Assertions.assertThat(logEntry.requests()).hasValue(100L);
Assertions.assertThat(logEntry.responses()).hasValue(100L);
Assertions.assertThat(logEntry.peerAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localPort()).hasValue(listenPort);
Assertions.assertThat(logEntry.httpBytesReceived()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(100000L));
Assertions.assertThat(logEntry.httpBytesSent()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(10000L));
Assertions.assertThat(logEntry.sslProtocol()).hasValueSatisfying(TlsContext.ALLOWED_PROTOCOLS::contains);
Assertions.assertThat(logEntry.sslPeerSubject()).hasValue("CN=localhost");
Assertions.assertThat(logEntry.sslCipherSuite()).hasValueSatisfying(cipher -> Assertions.assertThat(cipher).isNotBlank());
Assertions.assertThat(logEntry.sslSessionId()).hasValueSatisfying(sessionId -> Assertions.assertThat(sessionId).hasSize(64));
Assertions.assertThat(logEntry.sslPeerNotBefore()).hasValue(Instant.EPOCH);
Assertions.assertThat(logEntry.sslPeerNotAfter()).hasValue(Instant.EPOCH.plus(100_000, ChronoUnit.DAYS));
}
@Test
public void requireThatRequestIsTrackedInAccessLog() throws IOException, InterruptedException {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder(),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().newPost("/status.html").setContent("abcdef").execute().expectStatusCode(is(OK));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
Assertions.assertThat(entry.statusCode()).hasValue(200);
Assertions.assertThat(entry.requestSize()).hasValue(6);
assertThat(driver.close(), is(true));
}
@Test
public void requireThatRequestsPerConnectionMetricIsAggregated() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
var metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
driver.client().get("/").expectStatusCode(is(OK));
assertThat(driver.close(), is(true));
verify(metricConsumer.mockitoMock(), atLeast(1))
.set(MetricDefinitions.REQUESTS_PER_CONNECTION, 1L, MetricConsumerMock.STATIC_CONTEXT);
}
@Test
public void uriWithEmptyPathSegmentIsAllowed() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
String uriPath = "/path/with/empty
driver.client().get(uriPath).expectStatusCode(is(OK));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
}
private static CloseableHttpAsyncClient createHttp2Client(JettyTestDriver driver) {
TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create()
.setSslContext(driver.sslContext())
.build();
var client = H2AsyncClientBuilder.create()
.disableAutomaticRetries()
.setTlsStrategy(tlsStrategy)
.build();
client.start();
return client;
}
private static JettyTestDriver createSslWithTlsClientAuthenticationEnforcer(Path certificateFile, Path privateKeyFile) {
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.tlsClientAuthEnforcer(
new ConnectorConfig.TlsClientAuthEnforcer.Builder()
.enable(true)
.pathWhitelist("/status.html"))
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.WANT_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
return JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder().connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true)),
connectorConfig,
binder -> {});
}
private static RequestHandler mockRequestHandler() {
final RequestHandler mockRequestHandler = mock(RequestHandler.class);
when(mockRequestHandler.refer()).thenReturn(References.NOOP_REFERENCE);
return mockRequestHandler;
}
private static String generateContent(final char c, final int len) {
final StringBuilder ret = new StringBuilder(len);
for (int i = 0; i < len; ++i) {
ret.append(c);
}
return ret.toString();
}
private static JettyTestDriver newDriverWithFormPostContentRemoved(RequestHandler requestHandler,
boolean removeFormPostBody) throws Exception {
return JettyTestDriver.newConfiguredInstance(
requestHandler,
new ServerConfig.Builder()
.removeRawPostBodyForWwwUrlEncodedPost(removeFormPostBody),
new ConnectorConfig.Builder());
}
private static FormBodyPart newFileBody(final String fileName, final String fileContent) {
return FormBodyPartBuilder.create()
.setBody(
new StringBody(fileContent, ContentType.TEXT_PLAIN) {
@Override public String getFilename() { return fileName; }
@Override public String getMimeType() { return ""; }
@Override public String getCharset() { return null; }
})
.setName(fileName)
.build();
}
private static class ConnectedAtRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpRequest httpRequest = (HttpRequest)request;
final String connectedAt = String.valueOf(httpRequest.getConnectedAt(TimeUnit.MILLISECONDS));
final ContentChannel ch = handler.handleResponse(new Response(OK));
ch.write(ByteBuffer.wrap(connectedAt.getBytes(StandardCharsets.UTF_8)), null);
ch.close(null);
return null;
}
}
private static class CookieSetterRequestHandler extends AbstractRequestHandler {
final Cookie cookie;
CookieSetterRequestHandler(final Cookie cookie) {
this.cookie = cookie;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpResponse response = HttpResponse.newInstance(OK);
response.encodeSetCookieHeader(Collections.singletonList(cookie));
ResponseDispatch.newInstance(response).dispatch(handler);
return null;
}
}
private static class CookiePrinterRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final List<Cookie> cookies = new ArrayList<>(((HttpRequest)request).decodeCookieHeader());
Collections.sort(cookies, new CookieComparator());
final ContentChannel out = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
out.write(StandardCharsets.UTF_8.encode(cookies.toString()), null);
out.close(null);
return null;
}
}
private static class ParameterPrinterRequestHandler extends AbstractRequestHandler {
private static final CompletionHandler NULL_COMPLETION_HANDLER = null;
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Map<String, List<String>> parameters = new TreeMap<>(((HttpRequest)request).parameters());
ContentChannel responseContentChannel = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
responseContentChannel.write(ByteBuffer.wrap(parameters.toString().getBytes(StandardCharsets.UTF_8)),
NULL_COMPLETION_HANDLER);
return responseContentChannel;
}
}
private static class RequestTypeHandler extends AbstractRequestHandler {
private Request.RequestType requestType = null;
public void setRequestType(Request.RequestType requestType) {
this.requestType = requestType;
}
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
response.setRequestType(requestType);
return handler.handleResponse(response);
}
}
private static class ThrowingHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
throw new RuntimeException("Deliberately thrown exception");
}
}
private static class UnresponsiveHandler extends AbstractRequestHandler {
ResponseHandler responseHandler;
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
request.setTimeout(100, TimeUnit.MILLISECONDS);
responseHandler = handler;
return null;
}
}
private static class OkRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
handler.handleResponse(response).close(null);
return NullContent.INSTANCE;
}
}
private static class EchoWithHeaderRequestHandler extends AbstractRequestHandler {
final String headerName;
final String headerValue;
EchoWithHeaderRequestHandler(final String headerName, final String headerValue) {
this.headerName = headerName;
this.headerValue = headerValue;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final Response response = new Response(OK);
response.headers().add(headerName, headerValue);
return handler.handleResponse(response);
}
}
private static Module newBindingSetSelector(final String setName) {
return new AbstractModule() {
@Override
protected void configure() {
bind(BindingSetSelector.class).toInstance(new BindingSetSelector() {
@Override
public String select(final URI uri) {
return setName;
}
});
}
};
}
private static class CookieComparator implements Comparator<Cookie> {
@Override
public int compare(final Cookie lhs, final Cookie rhs) {
return lhs.getName().compareTo(rhs.getName());
}
}
} | class HttpServerTest {
@Rule
public TemporaryFolder tmpFolder = new TemporaryFolder();
@Test
public void requireThatServerCanListenToRandomPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
assertNotEquals(0, driver.server().getListenPort());
assertTrue(driver.close());
}
@Test
public void requireThatServerCanNotListenToBoundPort() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
try {
JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.listenPort(driver.server().getListenPort())
);
} catch (final Throwable t) {
assertThat(t.getCause(), instanceOf(BindException.class));
}
assertTrue(driver.close());
}
@Test
public void requireThatBindingSetNotFoundReturns404() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder()
.developerMode(true),
new ConnectorConfig.Builder(),
newBindingSetSelector("unknown"));
driver.client().get("/status.html")
.expectStatusCode(is(NOT_FOUND))
.expectContent(containsPattern(Pattern.compile(
Pattern.quote(BindingSetNotFoundException.class.getName()) +
": No binding set named 'unknown'\\.\n\tat .+",
Pattern.DOTALL | Pattern.MULTILINE)));
assertTrue(driver.close());
}
@Test
public void requireThatTooLongInitLineReturns414() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.requestHeaderSize(1));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
assertTrue(driver.close());
}
@Test
public void requireThatAccessLogIsCalledForRequestRejectedByJetty() throws Exception {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder().requestHeaderSize(1),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().get("/status.html")
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
assertEquals(414, entry.statusCode().getAsInt());
assertThat(driver.close(), is(true));
}
@Test
public void requireThatServerCanEcho() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanEchoCompressed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
SimpleHttpClient client = driver.newClient(true);
client.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanHandleMultipleRequests() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostDoesNotRemoveContentByDefault() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostKeepsContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), false);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}foo=bar"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostRemovesContentWhenConfiguredTo() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{foo=[bar]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithCharsetSpecifiedWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(X_DISABLE_CHUNKING, "true")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=UTF-8")
.setContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatEmptyFormPostWorks() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b&c=d")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUriParametersAreParsed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{a=[b], c=[d]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormAndUriParametersAreMerged() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d1")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("c=d2&e=f")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{a=[b], c=[d1, d2], e=[f]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormCharsetIsHonored() throws Exception {
final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=ISO-8859-1")
.setBinaryContent(new byte[]{66, (byte) 230, 114, 61, 98, 108, (byte) 229})
.execute();
response.expectStatusCode(is(OK))
.expectContent(is("{B\u00e6r=[bl\u00e5]}"));
assertTrue(driver.close());
}
@Test
public void requireThatUnknownFormCharsetIsTreatedAsBadRequest() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=FLARBA-GARBA-7")
.setContent("a=b")
.execute();
response.expectStatusCode(is(UNSUPPORTED_MEDIA_TYPE));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithPercentEncodedContentIsDecoded() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("%20%3D%C3%98=%22%25+")
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith("{ =\u00d8=[\"% ]}"));
assertTrue(driver.close());
}
@Test
public void requireThatFormPostWithThrowingHandlerIsExceptionSafe() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ThrowingHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setContent("a=b")
.execute();
response.expectStatusCode(is(INTERNAL_SERVER_ERROR));
assertTrue(driver.close());
}
@Test
public void requireThatMultiPostWorks() throws Exception {
final String startTxtContent = "this is a test for POST.";
final String updaterConfContent
= "identifier = updater\n"
+ "server_type = gds\n";
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.setMultipartContent(
newFileBody("start.txt", startTxtContent),
newFileBody("updater.conf", updaterConfContent))
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString(startTxtContent))
.expectContent(containsString(updaterConfContent));
}
@Test
public void requireThatRequestCookiesAreReceived() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookiePrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(COOKIE, "foo=bar")
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString("[foo=bar]"));
assertTrue(driver.close());
}
@Test
public void requireThatSetCookieHeaderIsCorrect() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new CookieSetterRequestHandler(
new Cookie("foo", "bar")
.setDomain(".localhost")
.setHttpOnly(true)
.setPath("/foopath")
.setSecure(true)));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("Set-Cookie",
is("foo=bar; Path=/foopath; Domain=.localhost; Secure; HttpOnly"));
assertTrue(driver.close());
}
@Test
public void requireThatTimeoutWorks() throws Exception {
final UnresponsiveHandler requestHandler = new UnresponsiveHandler();
final JettyTestDriver driver = JettyTestDriver.newInstance(requestHandler);
driver.client().get("/status.html")
.expectStatusCode(is(GATEWAY_TIMEOUT));
ResponseDispatch.newInstance(OK).dispatch(requestHandler.responseHandler);
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithNullValueIsOmitted() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", null));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader("X-Foo");
assertTrue(driver.close());
}
@Test
public void requireThatHeaderWithEmptyValueIsAllowed() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", ""));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("X-Foo", is(""));
assertTrue(driver.close());
}
@Test
public void requireThatNoConnectionHeaderMeansKeepAliveInHttp11KeepAliveDisabled() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler(CONNECTION, CLOSE));
driver.client().get("/status.html")
.expectHeader(CONNECTION, is(CLOSE));
assertThat(driver.close(), is(true));
}
@Test
@Test
public void requireThatServerCanRespondToSslRequest() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
final JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatServerCanRespondToHttp2Request() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertNull(response.getBodyText());
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
ConnectionLogEntry entry = connectionLog.logEntries().get(0);
assertEquals("HTTP/2.0", entry.httpProtocol().get());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerRejectsRequestsForNonWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = createSslWithTlsClientAuthenticationEnforcer(certificateFile, privateKeyFile);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/dummy.html")
.expectStatusCode(is(UNAUTHORIZED));
assertTrue(driver.close());
}
@Test
public void requireThatTlsClientAuthenticationEnforcerAllowsRequestForWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
.build();
new SimpleHttpClient(trustStoreOnlyCtx, driver.server().getListenPort(), false)
.get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectedAtReturnsNonZero() throws Exception {
final JettyTestDriver driver = JettyTestDriver.newInstance(new ConnectedAtRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectContent(matchesPattern("\\d{13,}"));
assertThat(driver.close(), is(true));
}
@Test
public void requireThatGzipEncodingRequestsAreAutomaticallyDecompressed() throws Exception {
JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
String requestContent = generateContent('a', 30);
ResponseValidator response = driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
.setGzipContent(requestContent)
.execute();
response.expectStatusCode(is(OK))
.expectContent(startsWith('{' + requestContent + "=[]}"));
assertTrue(driver.close());
}
@Test
public void requireThatResponseStatsAreCollected() throws Exception {
RequestTypeHandler handler = new RequestTypeHandler();
JettyTestDriver driver = JettyTestDriver.newInstance(handler);
HttpResponseStatisticsCollector statisticsCollector = ((AbstractHandlerContainer) driver.server().server().getHandler())
.getChildHandlerByClass(HttpResponseStatisticsCollector.class);
{
List<HttpResponseStatisticsCollector.StatisticsEntry> stats = statisticsCollector.takeStatistics();
assertEquals(0, stats.size());
}
{
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("POST", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("write", entry.requestType);
assertEquals(1, entry.value);
}
{
driver.client().newGet("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("http", entry.scheme);
assertEquals("GET", entry.method);
assertEquals("http.status.2xx", entry.name);
assertEquals("read", entry.requestType);
assertEquals(1, entry.value);
}
{
handler.setRequestType(Request.RequestType.READ);
driver.client().newPost("/status.html").execute();
var entry = waitForStatistics(statisticsCollector);
assertEquals("Handler overrides request type", "read", entry.requestType);
}
assertTrue(driver.close());
}
private HttpResponseStatisticsCollector.StatisticsEntry waitForStatistics(HttpResponseStatisticsCollector
statisticsCollector) {
List<HttpResponseStatisticsCollector.StatisticsEntry> entries = Collections.emptyList();
int tries = 0;
while (entries.isEmpty() && tries < 10000) {
entries = statisticsCollector.takeStatistics();
if (entries.isEmpty())
try {Thread.sleep(100); } catch (InterruptedException e) {}
tries++;
}
assertEquals(1, entries.size());
return entries.get(0);
}
@Test
public void requireThatConnectionThrottleDoesNotBlockConnectionsBelowThreshold() throws Exception {
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
.throttling(new Throttling.Builder()
.enabled(true)
.maxAcceptRate(10)
.maxHeapUtilization(1.0)
.maxConnections(10)));
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
public void requireThatConnectionIsTrackedInConnectionLog() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
Module overrideModule = binder -> binder.bind(ConnectionLog.class).toInstance(connectionLog);
JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new OkRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.NEED, overrideModule);
int listenPort = driver.server().getListenPort();
StringBuilder builder = new StringBuilder();
for (int i = 0; i < 1000; i++) {
builder.append(i);
}
byte[] content = builder.toString().getBytes();
for (int i = 0; i < 100; i++) {
driver.client().newPost("/status.html").setBinaryContent(content).execute()
.expectStatusCode(is(OK));
}
assertTrue(driver.close());
List<ConnectionLogEntry> logEntries = connectionLog.logEntries();
Assertions.assertThat(logEntries).hasSize(1);
ConnectionLogEntry logEntry = logEntries.get(0);
assertEquals(4, UUID.fromString(logEntry.id()).version());
Assertions.assertThat(logEntry.timestamp()).isAfter(Instant.EPOCH);
Assertions.assertThat(logEntry.requests()).hasValue(100L);
Assertions.assertThat(logEntry.responses()).hasValue(100L);
Assertions.assertThat(logEntry.peerAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localAddress()).hasValue("127.0.0.1");
Assertions.assertThat(logEntry.localPort()).hasValue(listenPort);
Assertions.assertThat(logEntry.httpBytesReceived()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(100000L));
Assertions.assertThat(logEntry.httpBytesSent()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(10000L));
Assertions.assertThat(logEntry.sslProtocol()).hasValueSatisfying(TlsContext.ALLOWED_PROTOCOLS::contains);
Assertions.assertThat(logEntry.sslPeerSubject()).hasValue("CN=localhost");
Assertions.assertThat(logEntry.sslCipherSuite()).hasValueSatisfying(cipher -> Assertions.assertThat(cipher).isNotBlank());
Assertions.assertThat(logEntry.sslSessionId()).hasValueSatisfying(sessionId -> Assertions.assertThat(sessionId).hasSize(64));
Assertions.assertThat(logEntry.sslPeerNotBefore()).hasValue(Instant.EPOCH);
Assertions.assertThat(logEntry.sslPeerNotAfter()).hasValue(Instant.EPOCH.plus(100_000, ChronoUnit.DAYS));
}
@Test
public void requireThatRequestIsTrackedInAccessLog() throws IOException, InterruptedException {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder(),
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().newPost("/status.html").setContent("abcdef").execute().expectStatusCode(is(OK));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
Assertions.assertThat(entry.statusCode()).hasValue(200);
Assertions.assertThat(entry.requestSize()).hasValue(6);
assertThat(driver.close(), is(true));
}
@Test
public void requireThatRequestsPerConnectionMetricIsAggregated() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
var metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
driver.client().get("/").expectStatusCode(is(OK));
assertThat(driver.close(), is(true));
verify(metricConsumer.mockitoMock(), atLeast(1))
.set(MetricDefinitions.REQUESTS_PER_CONNECTION, 1L, MetricConsumerMock.STATIC_CONTEXT);
}
@Test
public void uriWithEmptyPathSegmentIsAllowed() throws Exception {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
MetricConsumerMock metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
String uriPath = "/path/with/empty
driver.client().get(uriPath).expectStatusCode(is(OK));
try (CloseableHttpAsyncClient client = createHttp2Client(driver)) {
String uri = "https:
SimpleHttpResponse response = client.execute(SimpleRequestBuilder.get(uri).build(), null).get();
assertEquals(OK, response.getCode());
}
assertTrue(driver.close());
}
private static CloseableHttpAsyncClient createHttp2Client(JettyTestDriver driver) {
TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create()
.setSslContext(driver.sslContext())
.build();
var client = H2AsyncClientBuilder.create()
.disableAutomaticRetries()
.setTlsStrategy(tlsStrategy)
.build();
client.start();
return client;
}
private static JettyTestDriver createSslWithTlsClientAuthenticationEnforcer(Path certificateFile, Path privateKeyFile) {
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
.tlsClientAuthEnforcer(
new ConnectorConfig.TlsClientAuthEnforcer.Builder()
.enable(true)
.pathWhitelist("/status.html"))
.ssl(new ConnectorConfig.Ssl.Builder()
.enabled(true)
.clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.WANT_AUTH)
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
return JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder().connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true)),
connectorConfig,
binder -> {});
}
private static RequestHandler mockRequestHandler() {
final RequestHandler mockRequestHandler = mock(RequestHandler.class);
when(mockRequestHandler.refer()).thenReturn(References.NOOP_REFERENCE);
return mockRequestHandler;
}
private static String generateContent(final char c, final int len) {
final StringBuilder ret = new StringBuilder(len);
for (int i = 0; i < len; ++i) {
ret.append(c);
}
return ret.toString();
}
private static JettyTestDriver newDriverWithFormPostContentRemoved(RequestHandler requestHandler,
boolean removeFormPostBody) throws Exception {
return JettyTestDriver.newConfiguredInstance(
requestHandler,
new ServerConfig.Builder()
.removeRawPostBodyForWwwUrlEncodedPost(removeFormPostBody),
new ConnectorConfig.Builder());
}
private static FormBodyPart newFileBody(final String fileName, final String fileContent) {
return FormBodyPartBuilder.create()
.setBody(
new StringBody(fileContent, ContentType.TEXT_PLAIN) {
@Override public String getFilename() { return fileName; }
@Override public String getMimeType() { return ""; }
@Override public String getCharset() { return null; }
})
.setName(fileName)
.build();
}
private static class ConnectedAtRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpRequest httpRequest = (HttpRequest)request;
final String connectedAt = String.valueOf(httpRequest.getConnectedAt(TimeUnit.MILLISECONDS));
final ContentChannel ch = handler.handleResponse(new Response(OK));
ch.write(ByteBuffer.wrap(connectedAt.getBytes(StandardCharsets.UTF_8)), null);
ch.close(null);
return null;
}
}
private static class CookieSetterRequestHandler extends AbstractRequestHandler {
final Cookie cookie;
CookieSetterRequestHandler(final Cookie cookie) {
this.cookie = cookie;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final HttpResponse response = HttpResponse.newInstance(OK);
response.encodeSetCookieHeader(Collections.singletonList(cookie));
ResponseDispatch.newInstance(response).dispatch(handler);
return null;
}
}
private static class CookiePrinterRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final List<Cookie> cookies = new ArrayList<>(((HttpRequest)request).decodeCookieHeader());
Collections.sort(cookies, new CookieComparator());
final ContentChannel out = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
out.write(StandardCharsets.UTF_8.encode(cookies.toString()), null);
out.close(null);
return null;
}
}
private static class ParameterPrinterRequestHandler extends AbstractRequestHandler {
private static final CompletionHandler NULL_COMPLETION_HANDLER = null;
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Map<String, List<String>> parameters = new TreeMap<>(((HttpRequest)request).parameters());
ContentChannel responseContentChannel = ResponseDispatch.newInstance(Response.Status.OK).connect(handler);
responseContentChannel.write(ByteBuffer.wrap(parameters.toString().getBytes(StandardCharsets.UTF_8)),
NULL_COMPLETION_HANDLER);
return responseContentChannel;
}
}
private static class RequestTypeHandler extends AbstractRequestHandler {
private Request.RequestType requestType = null;
public void setRequestType(Request.RequestType requestType) {
this.requestType = requestType;
}
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
response.setRequestType(requestType);
return handler.handleResponse(response);
}
}
private static class ThrowingHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
throw new RuntimeException("Deliberately thrown exception");
}
}
private static class UnresponsiveHandler extends AbstractRequestHandler {
ResponseHandler responseHandler;
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
request.setTimeout(100, TimeUnit.MILLISECONDS);
responseHandler = handler;
return null;
}
}
private static class OkRequestHandler extends AbstractRequestHandler {
@Override
public ContentChannel handleRequest(Request request, ResponseHandler handler) {
Response response = new Response(OK);
handler.handleResponse(response).close(null);
return NullContent.INSTANCE;
}
}
private static class EchoWithHeaderRequestHandler extends AbstractRequestHandler {
final String headerName;
final String headerValue;
EchoWithHeaderRequestHandler(final String headerName, final String headerValue) {
this.headerName = headerName;
this.headerValue = headerValue;
}
@Override
public ContentChannel handleRequest(final Request request, final ResponseHandler handler) {
final Response response = new Response(OK);
response.headers().add(headerName, headerValue);
return handler.handleResponse(response);
}
}
private static Module newBindingSetSelector(final String setName) {
return new AbstractModule() {
@Override
protected void configure() {
bind(BindingSetSelector.class).toInstance(new BindingSetSelector() {
@Override
public String select(final URI uri) {
return setName;
}
});
}
};
}
private static class CookieComparator implements Comparator<Cookie> {
@Override
public int compare(final Cookie lhs, final Cookie rhs) {
return lhs.getName().compareTo(rhs.getName());
}
}
} | |
failCount of failed().size(). | private PreparedApplications waitForPrepare(Map<ApplicationId, Future<Optional<Deployment>>> deployments) {
int applicationCount = deployments.size();
PreparedApplications applications = new PreparedApplications();
Instant lastLogged = Instant.EPOCH;
do {
deployments.forEach((applicationId, future) -> {
if (applications.prepareFinished(applicationId)) return;
DeploymentInfo status = getDeploymentStatus(applicationId, future);
switch (status.status()) {
case success:
case failed:
applications.add(status);
break;
case inProgress:
break;
default:
throw new IllegalArgumentException("Unknown deployment status " + status);
}
});
if ( ! Duration.between(lastLogged, Instant.now()).minus(Duration.ofSeconds(10)).isNegative()) {
logProgress(applicationCount, applications);
lastLogged = Instant.now();
}
} while (applications.failed().size() + applications.successCount() < applicationCount);
logProgress(applicationCount, applications);
return applications;
} | } while (applications.failed().size() + applications.successCount() < applicationCount); | private PreparedApplications waitForPrepare(Map<ApplicationId, Future<Optional<Deployment>>> deployments) {
int applicationCount = deployments.size();
PreparedApplications applications = new PreparedApplications();
Instant lastLogged = Instant.EPOCH;
do {
deployments.forEach((applicationId, future) -> {
if (applications.prepareFinished(applicationId)) return;
DeploymentInfo status = getDeploymentStatus(applicationId, future);
switch (status.status()) {
case success:
case failed:
applications.add(status);
break;
case inProgress:
break;
default:
throw new IllegalArgumentException("Unknown deployment status " + status);
}
});
if ( ! Duration.between(lastLogged, Instant.now()).minus(Duration.ofSeconds(10)).isNegative()) {
logProgress(applicationCount, applications);
lastLogged = Instant.now();
}
} while (applications.failCount() + applications.successCount() < applicationCount);
logProgress(applicationCount, applications);
return applications;
} | class ConfigServerBootstrap extends AbstractComponent implements Runnable {
private static final Logger log = Logger.getLogger(ConfigServerBootstrap.class.getName());
enum Mode { BOOTSTRAP_IN_CONSTRUCTOR, FOR_TESTING_NO_BOOTSTRAP_OF_APPS}
enum RedeployingApplicationsFails { EXIT_JVM, CONTINUE }
enum VipStatusMode { VIP_STATUS_FILE, VIP_STATUS_PROGRAMMATICALLY }
private final ApplicationRepository applicationRepository;
private final RpcServer server;
private final VersionState versionState;
private final StateMonitor stateMonitor;
private final VipStatus vipStatus;
private final ConfigserverConfig configserverConfig;
private final Duration maxDurationOfRedeployment;
private final Duration sleepTimeWhenRedeployingFails;
private final RedeployingApplicationsFails exitIfRedeployingApplicationsFails;
private final ExecutorService rpcServerExecutor;
@Inject
public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server,
VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus) {
this(applicationRepository, server, versionState, stateMonitor, vipStatus, BOOTSTRAP_IN_CONSTRUCTOR, EXIT_JVM,
applicationRepository.configserverConfig().hostedVespa()
? VipStatusMode.VIP_STATUS_FILE
: VipStatusMode.VIP_STATUS_PROGRAMMATICALLY);
}
ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState,
StateMonitor stateMonitor, VipStatus vipStatus, VipStatusMode vipStatusMode) {
this(applicationRepository, server, versionState, stateMonitor, vipStatus,
FOR_TESTING_NO_BOOTSTRAP_OF_APPS, CONTINUE, vipStatusMode);
}
private ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server,
VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus,
Mode mode, RedeployingApplicationsFails exitIfRedeployingApplicationsFails,
VipStatusMode vipStatusMode) {
this.applicationRepository = applicationRepository;
this.server = server;
this.versionState = versionState;
this.stateMonitor = stateMonitor;
this.vipStatus = vipStatus;
this.configserverConfig = applicationRepository.configserverConfig();
this.maxDurationOfRedeployment = Duration.ofSeconds(configserverConfig.maxDurationOfBootstrap());
this.sleepTimeWhenRedeployingFails = Duration.ofSeconds(configserverConfig.sleepTimeWhenRedeployingFails());
this.exitIfRedeployingApplicationsFails = exitIfRedeployingApplicationsFails;
rpcServerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config server RPC server"));
log.log(Level.FINE, () -> "Bootstrap mode: " + mode + ", VIP status mode: " + vipStatusMode);
initializing(vipStatusMode);
switch (mode) {
case BOOTSTRAP_IN_CONSTRUCTOR:
start();
break;
case FOR_TESTING_NO_BOOTSTRAP_OF_APPS:
break;
default:
throw new IllegalArgumentException("Unknown bootstrap mode " + mode + ", legal values: " + Arrays.toString(Mode.values()));
}
}
@Override
public void deconstruct() {
log.log(Level.INFO, "Stopping config server");
down();
server.stop();
log.log(Level.FINE, "RPC server stopped");
rpcServerExecutor.shutdown();
}
@Override
public void run() {
start();
do {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
log.log(Level.SEVERE, "Got interrupted", e);
break;
}
} while (server.isRunning());
down();
}
public void start() {
if (versionState.isUpgraded()) {
log.log(Level.INFO, "Config server upgrading from " + versionState.storedVersion() + " to "
+ versionState.currentVersion() + ". Redeploying all applications");
try {
if ( ! redeployAllApplications()) {
redeployingApplicationsFailed();
return;
}
versionState.saveNewVersion();
log.log(Level.INFO, "All applications redeployed successfully");
} catch (Exception e) {
log.log(Level.SEVERE, "Redeployment of applications failed", e);
redeployingApplicationsFailed();
return;
}
}
applicationRepository.bootstrappingDone();
startRpcServer();
up();
}
StateMonitor.Status status() {
return stateMonitor.status();
}
private void up() {
vipStatus.setInRotation(true);
}
private void down() {
vipStatus.setInRotation(false);
}
private void initializing(VipStatusMode vipStatusMode) {
stateMonitor.status(StateMonitor.Status.initializing);
if (vipStatusMode == VipStatusMode.VIP_STATUS_PROGRAMMATICALLY)
vipStatus.setInRotation(false);
}
private void startRpcServer() {
rpcServerExecutor.execute(server);
Instant end = Instant.now().plus(Duration.ofSeconds(10));
while (!server.isRunning() && Instant.now().isBefore(end)) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
log.log(Level.SEVERE, "Got interrupted", e);
break;
}
}
if (!server.isRunning())
throw new RuntimeException("RPC server not started in 10 seconds");
}
private void redeployingApplicationsFailed() {
if (exitIfRedeployingApplicationsFails == EXIT_JVM) System.exit(1);
}
private boolean redeployAllApplications() throws InterruptedException {
Instant end = Instant.now().plus(maxDurationOfRedeployment);
List<ApplicationId> applicationsToRedeploy = applicationRepository.listApplications();
Collections.shuffle(applicationsToRedeploy);
long failCount = 0;
do {
applicationsToRedeploy = redeployApplications(applicationsToRedeploy);
if ( ! applicationsToRedeploy.isEmpty() && ! sleepTimeWhenRedeployingFails.isZero()) {
Duration sleepTime = sleepTimeWhenRedeployingFails.multipliedBy(++failCount);
if (sleepTime.compareTo(Duration.ofMinutes(10)) > 0)
sleepTime = Duration.ofMinutes(10);
log.log(Level.INFO, "Redeployment of " + applicationsToRedeploy + " not finished, will retry in " + sleepTime);
Thread.sleep(sleepTime.toMillis());
}
} while ( ! applicationsToRedeploy.isEmpty() && Instant.now().isBefore(end));
if ( ! applicationsToRedeploy.isEmpty()) {
log.log(Level.SEVERE, "Redeploying applications not finished after " + maxDurationOfRedeployment +
", exiting, applications that failed redeployment: " + applicationsToRedeploy);
return false;
}
return true;
}
private List<ApplicationId> redeployApplications(List<ApplicationId> applicationIds) throws InterruptedException {
ExecutorService executor = getExecutor();
log.log(Level.INFO, () -> "Redeploying " + applicationIds.size() + " apps: " + applicationIds);
PreparedApplications preparedApplications = prepare(applicationIds, executor);
if (preparedApplications.hasPrepareFailures()) {
log.log(Level.INFO, "Failed preparing applications: " + preparedApplications.failed());
return preparedApplications.failed();
}
List<ApplicationId> failed = activate(preparedApplications.deploymentInfos());
shutdownExecutor(executor);
return failed;
}
private void shutdownExecutor(ExecutorService executor) throws InterruptedException {
executor.shutdown();
if ( ! executor.awaitTermination(1, TimeUnit.HOURS)) {
log.log(Level.WARNING, "Awaiting termination of executor failed");
executor.shutdownNow();
}
}
private ExecutorService getExecutor() {
return Executors.newFixedThreadPool(configserverConfig.numRedeploymentThreads(),
new DaemonThreadFactory("redeploy-apps-"));
}
private PreparedApplications prepare(List<ApplicationId> applicationIds, ExecutorService executor) {
Map<ApplicationId, Future<Optional<Deployment>>> prepared = new HashMap<>();
applicationIds.forEach(appId -> prepared.put(appId, executor.submit(() -> {
log.log(Level.INFO, () -> "Preparing " + appId);
Optional<Deployment> deployment = applicationRepository.deployFromLocalActive(appId, true /* bootstrap */);
if (deployment.isPresent()) {
deployment.get().prepare();
log.log(Level.INFO, () -> appId + " prepared");
} else {
log.log(Level.INFO, () -> "No deployment present for appId, not prepared");
}
return deployment;
})));
return waitForPrepare(prepared);
}
private List<ApplicationId> activate(List<DeploymentInfo> deployments) {
List<ApplicationId> failedActivations = new ArrayList<>();
deployments.forEach(d -> {
ApplicationId applicationId = d.applicationId();
log.log(Level.INFO, () -> "Activating " + applicationId);
try {
d.deployment().ifPresent(Deployment::activate);
} catch (Exception e) {
log.log(Level.INFO, () -> "Failed activating " + applicationId + ":" + e.getMessage());
failedActivations.add(applicationId);
}
log.log(Level.INFO, () -> applicationId + " activated");
});
return failedActivations;
}
private void logProgress(int applicationCount, PreparedApplications preparedApplications) {
log.log(Level.INFO, () -> preparedApplications.successCount() + " of " + applicationCount + " apps prepared " +
"(" + preparedApplications.failCount() + " failed)");
}
private DeploymentInfo getDeploymentStatus(ApplicationId applicationId, Future<Optional<Deployment>> future) {
try {
Optional<Deployment> deployment = future.get(1, TimeUnit.MILLISECONDS);
return new DeploymentInfo(applicationId, DeploymentInfo.Status.success, deployment);
} catch (ExecutionException | InterruptedException e) {
if (e.getCause() instanceof TransientException) {
log.log(Level.INFO, "Preparing" + " " + applicationId +
" failed with transient error, will retry after bootstrap: " + Exceptions.toMessageString(e));
} else {
log.log(Level.WARNING, "Preparing" + " " + applicationId + " failed, will retry", e);
}
return new DeploymentInfo(applicationId, DeploymentInfo.Status.failed);
} catch (TimeoutException e) {
return new DeploymentInfo(applicationId, DeploymentInfo.Status.inProgress);
}
}
private static class DeploymentInfo {
public enum Status { inProgress, success, failed }
private final ApplicationId applicationId;
private final Status status;
private final Optional<Deployment> deployment;
public DeploymentInfo(ApplicationId applicationId, Status status) {
this(applicationId, status, Optional.empty());
}
public DeploymentInfo(ApplicationId applicationId, Status status, Optional<Deployment> deployment) {
this.applicationId = applicationId;
this.status = status;
this.deployment = deployment;
}
public ApplicationId applicationId() { return applicationId; }
public Status status() { return status; }
public Optional<Deployment> deployment() { return deployment; }
}
private static class PreparedApplications {
private final List<DeploymentInfo> deploymentInfos = new ArrayList<>();
public void add(DeploymentInfo deploymentInfo) {
this.deploymentInfos.add(deploymentInfo);
}
List<ApplicationId> success() { return withStatus(DeploymentInfo.Status.success); }
List<ApplicationId> failed() { return withStatus(DeploymentInfo.Status.failed); }
List<ApplicationId> withStatus(DeploymentInfo.Status status) {
return deploymentInfos.stream()
.filter(deploymentInfo -> deploymentInfo.status() == status)
.map(DeploymentInfo::applicationId)
.collect(Collectors.toList());
}
int successCount() { return success().size(); }
int failCount() { return failed().size(); }
boolean hasPrepareFailures() { return failCount() > 0; }
List<DeploymentInfo> deploymentInfos() { return deploymentInfos; }
boolean prepareFinished(ApplicationId applicationId) {
return failed().contains(applicationId) || success().contains(applicationId);
}
}
} | class ConfigServerBootstrap extends AbstractComponent implements Runnable {
private static final Logger log = Logger.getLogger(ConfigServerBootstrap.class.getName());
enum Mode { BOOTSTRAP_IN_CONSTRUCTOR, FOR_TESTING_NO_BOOTSTRAP_OF_APPS}
enum RedeployingApplicationsFails { EXIT_JVM, CONTINUE }
enum VipStatusMode { VIP_STATUS_FILE, VIP_STATUS_PROGRAMMATICALLY }
private final ApplicationRepository applicationRepository;
private final RpcServer server;
private final VersionState versionState;
private final StateMonitor stateMonitor;
private final VipStatus vipStatus;
private final ConfigserverConfig configserverConfig;
private final Duration maxDurationOfRedeployment;
private final Duration sleepTimeWhenRedeployingFails;
private final RedeployingApplicationsFails exitIfRedeployingApplicationsFails;
private final ExecutorService rpcServerExecutor;
@Inject
public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server,
VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus) {
this(applicationRepository, server, versionState, stateMonitor, vipStatus, BOOTSTRAP_IN_CONSTRUCTOR, EXIT_JVM,
applicationRepository.configserverConfig().hostedVespa()
? VipStatusMode.VIP_STATUS_FILE
: VipStatusMode.VIP_STATUS_PROGRAMMATICALLY);
}
ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState,
StateMonitor stateMonitor, VipStatus vipStatus, VipStatusMode vipStatusMode) {
this(applicationRepository, server, versionState, stateMonitor, vipStatus,
FOR_TESTING_NO_BOOTSTRAP_OF_APPS, CONTINUE, vipStatusMode);
}
private ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server,
VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus,
Mode mode, RedeployingApplicationsFails exitIfRedeployingApplicationsFails,
VipStatusMode vipStatusMode) {
this.applicationRepository = applicationRepository;
this.server = server;
this.versionState = versionState;
this.stateMonitor = stateMonitor;
this.vipStatus = vipStatus;
this.configserverConfig = applicationRepository.configserverConfig();
this.maxDurationOfRedeployment = Duration.ofSeconds(configserverConfig.maxDurationOfBootstrap());
this.sleepTimeWhenRedeployingFails = Duration.ofSeconds(configserverConfig.sleepTimeWhenRedeployingFails());
this.exitIfRedeployingApplicationsFails = exitIfRedeployingApplicationsFails;
rpcServerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config server RPC server"));
log.log(Level.FINE, () -> "Bootstrap mode: " + mode + ", VIP status mode: " + vipStatusMode);
initializing(vipStatusMode);
switch (mode) {
case BOOTSTRAP_IN_CONSTRUCTOR:
start();
break;
case FOR_TESTING_NO_BOOTSTRAP_OF_APPS:
break;
default:
throw new IllegalArgumentException("Unknown bootstrap mode " + mode + ", legal values: " + Arrays.toString(Mode.values()));
}
}
@Override
public void deconstruct() {
log.log(Level.INFO, "Stopping config server");
down();
server.stop();
log.log(Level.FINE, "RPC server stopped");
rpcServerExecutor.shutdown();
}
@Override
public void run() {
start();
do {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
log.log(Level.SEVERE, "Got interrupted", e);
break;
}
} while (server.isRunning());
down();
}
public void start() {
if (versionState.isUpgraded()) {
log.log(Level.INFO, "Config server upgrading from " + versionState.storedVersion() + " to "
+ versionState.currentVersion() + ". Redeploying all applications");
try {
if ( ! redeployAllApplications()) {
redeployingApplicationsFailed();
return;
}
versionState.saveNewVersion();
log.log(Level.INFO, "All applications redeployed successfully");
} catch (Exception e) {
log.log(Level.SEVERE, "Redeployment of applications failed", e);
redeployingApplicationsFailed();
return;
}
}
applicationRepository.bootstrappingDone();
startRpcServer();
up();
}
StateMonitor.Status status() {
return stateMonitor.status();
}
private void up() {
vipStatus.setInRotation(true);
}
private void down() {
vipStatus.setInRotation(false);
}
private void initializing(VipStatusMode vipStatusMode) {
stateMonitor.status(StateMonitor.Status.initializing);
if (vipStatusMode == VipStatusMode.VIP_STATUS_PROGRAMMATICALLY)
vipStatus.setInRotation(false);
}
private void startRpcServer() {
rpcServerExecutor.execute(server);
Instant end = Instant.now().plus(Duration.ofSeconds(10));
while (!server.isRunning() && Instant.now().isBefore(end)) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
log.log(Level.SEVERE, "Got interrupted", e);
break;
}
}
if (!server.isRunning())
throw new RuntimeException("RPC server not started in 10 seconds");
}
private void redeployingApplicationsFailed() {
if (exitIfRedeployingApplicationsFails == EXIT_JVM) System.exit(1);
}
private boolean redeployAllApplications() throws InterruptedException {
Instant end = Instant.now().plus(maxDurationOfRedeployment);
List<ApplicationId> applicationsToRedeploy = applicationRepository.listApplications();
Collections.shuffle(applicationsToRedeploy);
long failCount = 0;
do {
applicationsToRedeploy = redeployApplications(applicationsToRedeploy);
if ( ! applicationsToRedeploy.isEmpty() && ! sleepTimeWhenRedeployingFails.isZero()) {
Duration sleepTime = sleepTimeWhenRedeployingFails.multipliedBy(++failCount);
if (sleepTime.compareTo(Duration.ofMinutes(10)) > 0)
sleepTime = Duration.ofMinutes(10);
log.log(Level.INFO, "Redeployment of " + applicationsToRedeploy + " not finished, will retry in " + sleepTime);
Thread.sleep(sleepTime.toMillis());
}
} while ( ! applicationsToRedeploy.isEmpty() && Instant.now().isBefore(end));
if ( ! applicationsToRedeploy.isEmpty()) {
log.log(Level.SEVERE, "Redeploying applications not finished after " + maxDurationOfRedeployment +
", exiting, applications that failed redeployment: " + applicationsToRedeploy);
return false;
}
return true;
}
private List<ApplicationId> redeployApplications(List<ApplicationId> applicationIds) throws InterruptedException {
ExecutorService executor = getExecutor();
log.log(Level.INFO, () -> "Redeploying " + applicationIds.size() + " apps: " + applicationIds);
PreparedApplications preparedApplications = prepare(applicationIds, executor);
if (preparedApplications.hasPrepareFailures()) {
log.log(Level.INFO, "Failed preparing applications: " + preparedApplications.failed());
return preparedApplications.failed();
}
List<ApplicationId> failed = activate(preparedApplications.deploymentInfos());
shutdownExecutor(executor);
return failed;
}
private void shutdownExecutor(ExecutorService executor) throws InterruptedException {
executor.shutdown();
if ( ! executor.awaitTermination(1, TimeUnit.HOURS)) {
log.log(Level.WARNING, "Awaiting termination of executor failed");
executor.shutdownNow();
}
}
private ExecutorService getExecutor() {
return Executors.newFixedThreadPool(configserverConfig.numRedeploymentThreads(),
new DaemonThreadFactory("redeploy-apps-"));
}
private PreparedApplications prepare(List<ApplicationId> applicationIds, ExecutorService executor) {
Map<ApplicationId, Future<Optional<Deployment>>> prepared = new HashMap<>();
applicationIds.forEach(appId -> prepared.put(appId, executor.submit(() -> {
log.log(Level.INFO, () -> "Preparing " + appId);
Optional<Deployment> deployment = applicationRepository.deployFromLocalActive(appId, true /* bootstrap */);
if (deployment.isPresent()) {
deployment.get().prepare();
log.log(Level.INFO, () -> appId + " prepared");
} else {
log.log(Level.INFO, () -> "No deployment present for appId, not prepared");
}
return deployment;
})));
return waitForPrepare(prepared);
}
private List<ApplicationId> activate(List<DeploymentInfo> deployments) {
List<ApplicationId> failedActivations = new ArrayList<>();
deployments.forEach(d -> {
ApplicationId applicationId = d.applicationId();
log.log(Level.INFO, () -> "Activating " + applicationId);
try {
d.deployment().ifPresent(Deployment::activate);
} catch (Exception e) {
log.log(Level.INFO, () -> "Failed activating " + applicationId + ":" + e.getMessage());
failedActivations.add(applicationId);
}
log.log(Level.INFO, () -> applicationId + " activated");
});
return failedActivations;
}
private void logProgress(int applicationCount, PreparedApplications preparedApplications) {
log.log(Level.INFO, () -> preparedApplications.successCount() + " of " + applicationCount + " apps prepared " +
"(" + preparedApplications.failCount() + " failed)");
}
private DeploymentInfo getDeploymentStatus(ApplicationId applicationId, Future<Optional<Deployment>> future) {
try {
Optional<Deployment> deployment = future.get(1, TimeUnit.MILLISECONDS);
return new DeploymentInfo(applicationId, DeploymentInfo.Status.success, deployment);
} catch (ExecutionException | InterruptedException e) {
if (e.getCause() instanceof TransientException) {
log.log(Level.INFO, "Preparing" + " " + applicationId +
" failed with transient error, will retry after bootstrap: " + Exceptions.toMessageString(e));
} else {
log.log(Level.WARNING, "Preparing" + " " + applicationId + " failed, will retry", e);
}
return new DeploymentInfo(applicationId, DeploymentInfo.Status.failed);
} catch (TimeoutException e) {
return new DeploymentInfo(applicationId, DeploymentInfo.Status.inProgress);
}
}
private static class DeploymentInfo {
public enum Status { inProgress, success, failed }
private final ApplicationId applicationId;
private final Status status;
private final Optional<Deployment> deployment;
public DeploymentInfo(ApplicationId applicationId, Status status) {
this(applicationId, status, Optional.empty());
}
public DeploymentInfo(ApplicationId applicationId, Status status, Optional<Deployment> deployment) {
this.applicationId = applicationId;
this.status = status;
this.deployment = deployment;
}
public ApplicationId applicationId() { return applicationId; }
public Status status() { return status; }
public Optional<Deployment> deployment() { return deployment; }
}
private static class PreparedApplications {
private final List<DeploymentInfo> deploymentInfos = new ArrayList<>();
public void add(DeploymentInfo deploymentInfo) {
this.deploymentInfos.add(deploymentInfo);
}
List<ApplicationId> success() { return withStatus(DeploymentInfo.Status.success); }
List<ApplicationId> failed() { return withStatus(DeploymentInfo.Status.failed); }
List<ApplicationId> withStatus(DeploymentInfo.Status status) {
return deploymentInfos.stream()
.filter(deploymentInfo -> deploymentInfo.status() == status)
.map(DeploymentInfo::applicationId)
.collect(Collectors.toList());
}
int successCount() { return success().size(); }
int failCount() { return failed().size(); }
boolean hasPrepareFailures() { return failCount() > 0; }
List<DeploymentInfo> deploymentInfos() { return deploymentInfos; }
boolean prepareFinished(ApplicationId applicationId) {
return failed().contains(applicationId) || success().contains(applicationId);
}
}
} |
Thanks, fixed | private PreparedApplications waitForPrepare(Map<ApplicationId, Future<Optional<Deployment>>> deployments) {
int applicationCount = deployments.size();
PreparedApplications applications = new PreparedApplications();
Instant lastLogged = Instant.EPOCH;
do {
deployments.forEach((applicationId, future) -> {
if (applications.prepareFinished(applicationId)) return;
DeploymentInfo status = getDeploymentStatus(applicationId, future);
switch (status.status()) {
case success:
case failed:
applications.add(status);
break;
case inProgress:
break;
default:
throw new IllegalArgumentException("Unknown deployment status " + status);
}
});
if ( ! Duration.between(lastLogged, Instant.now()).minus(Duration.ofSeconds(10)).isNegative()) {
logProgress(applicationCount, applications);
lastLogged = Instant.now();
}
} while (applications.failed().size() + applications.successCount() < applicationCount);
logProgress(applicationCount, applications);
return applications;
} | } while (applications.failed().size() + applications.successCount() < applicationCount); | private PreparedApplications waitForPrepare(Map<ApplicationId, Future<Optional<Deployment>>> deployments) {
int applicationCount = deployments.size();
PreparedApplications applications = new PreparedApplications();
Instant lastLogged = Instant.EPOCH;
do {
deployments.forEach((applicationId, future) -> {
if (applications.prepareFinished(applicationId)) return;
DeploymentInfo status = getDeploymentStatus(applicationId, future);
switch (status.status()) {
case success:
case failed:
applications.add(status);
break;
case inProgress:
break;
default:
throw new IllegalArgumentException("Unknown deployment status " + status);
}
});
if ( ! Duration.between(lastLogged, Instant.now()).minus(Duration.ofSeconds(10)).isNegative()) {
logProgress(applicationCount, applications);
lastLogged = Instant.now();
}
} while (applications.failCount() + applications.successCount() < applicationCount);
logProgress(applicationCount, applications);
return applications;
} | class ConfigServerBootstrap extends AbstractComponent implements Runnable {
private static final Logger log = Logger.getLogger(ConfigServerBootstrap.class.getName());
enum Mode { BOOTSTRAP_IN_CONSTRUCTOR, FOR_TESTING_NO_BOOTSTRAP_OF_APPS}
enum RedeployingApplicationsFails { EXIT_JVM, CONTINUE }
enum VipStatusMode { VIP_STATUS_FILE, VIP_STATUS_PROGRAMMATICALLY }
private final ApplicationRepository applicationRepository;
private final RpcServer server;
private final VersionState versionState;
private final StateMonitor stateMonitor;
private final VipStatus vipStatus;
private final ConfigserverConfig configserverConfig;
private final Duration maxDurationOfRedeployment;
private final Duration sleepTimeWhenRedeployingFails;
private final RedeployingApplicationsFails exitIfRedeployingApplicationsFails;
private final ExecutorService rpcServerExecutor;
@Inject
public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server,
VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus) {
this(applicationRepository, server, versionState, stateMonitor, vipStatus, BOOTSTRAP_IN_CONSTRUCTOR, EXIT_JVM,
applicationRepository.configserverConfig().hostedVespa()
? VipStatusMode.VIP_STATUS_FILE
: VipStatusMode.VIP_STATUS_PROGRAMMATICALLY);
}
ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState,
StateMonitor stateMonitor, VipStatus vipStatus, VipStatusMode vipStatusMode) {
this(applicationRepository, server, versionState, stateMonitor, vipStatus,
FOR_TESTING_NO_BOOTSTRAP_OF_APPS, CONTINUE, vipStatusMode);
}
private ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server,
VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus,
Mode mode, RedeployingApplicationsFails exitIfRedeployingApplicationsFails,
VipStatusMode vipStatusMode) {
this.applicationRepository = applicationRepository;
this.server = server;
this.versionState = versionState;
this.stateMonitor = stateMonitor;
this.vipStatus = vipStatus;
this.configserverConfig = applicationRepository.configserverConfig();
this.maxDurationOfRedeployment = Duration.ofSeconds(configserverConfig.maxDurationOfBootstrap());
this.sleepTimeWhenRedeployingFails = Duration.ofSeconds(configserverConfig.sleepTimeWhenRedeployingFails());
this.exitIfRedeployingApplicationsFails = exitIfRedeployingApplicationsFails;
rpcServerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config server RPC server"));
log.log(Level.FINE, () -> "Bootstrap mode: " + mode + ", VIP status mode: " + vipStatusMode);
initializing(vipStatusMode);
switch (mode) {
case BOOTSTRAP_IN_CONSTRUCTOR:
start();
break;
case FOR_TESTING_NO_BOOTSTRAP_OF_APPS:
break;
default:
throw new IllegalArgumentException("Unknown bootstrap mode " + mode + ", legal values: " + Arrays.toString(Mode.values()));
}
}
@Override
public void deconstruct() {
log.log(Level.INFO, "Stopping config server");
down();
server.stop();
log.log(Level.FINE, "RPC server stopped");
rpcServerExecutor.shutdown();
}
@Override
public void run() {
start();
do {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
log.log(Level.SEVERE, "Got interrupted", e);
break;
}
} while (server.isRunning());
down();
}
public void start() {
if (versionState.isUpgraded()) {
log.log(Level.INFO, "Config server upgrading from " + versionState.storedVersion() + " to "
+ versionState.currentVersion() + ". Redeploying all applications");
try {
if ( ! redeployAllApplications()) {
redeployingApplicationsFailed();
return;
}
versionState.saveNewVersion();
log.log(Level.INFO, "All applications redeployed successfully");
} catch (Exception e) {
log.log(Level.SEVERE, "Redeployment of applications failed", e);
redeployingApplicationsFailed();
return;
}
}
applicationRepository.bootstrappingDone();
startRpcServer();
up();
}
StateMonitor.Status status() {
return stateMonitor.status();
}
private void up() {
vipStatus.setInRotation(true);
}
private void down() {
vipStatus.setInRotation(false);
}
private void initializing(VipStatusMode vipStatusMode) {
stateMonitor.status(StateMonitor.Status.initializing);
if (vipStatusMode == VipStatusMode.VIP_STATUS_PROGRAMMATICALLY)
vipStatus.setInRotation(false);
}
private void startRpcServer() {
rpcServerExecutor.execute(server);
Instant end = Instant.now().plus(Duration.ofSeconds(10));
while (!server.isRunning() && Instant.now().isBefore(end)) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
log.log(Level.SEVERE, "Got interrupted", e);
break;
}
}
if (!server.isRunning())
throw new RuntimeException("RPC server not started in 10 seconds");
}
private void redeployingApplicationsFailed() {
if (exitIfRedeployingApplicationsFails == EXIT_JVM) System.exit(1);
}
private boolean redeployAllApplications() throws InterruptedException {
Instant end = Instant.now().plus(maxDurationOfRedeployment);
List<ApplicationId> applicationsToRedeploy = applicationRepository.listApplications();
Collections.shuffle(applicationsToRedeploy);
long failCount = 0;
do {
applicationsToRedeploy = redeployApplications(applicationsToRedeploy);
if ( ! applicationsToRedeploy.isEmpty() && ! sleepTimeWhenRedeployingFails.isZero()) {
Duration sleepTime = sleepTimeWhenRedeployingFails.multipliedBy(++failCount);
if (sleepTime.compareTo(Duration.ofMinutes(10)) > 0)
sleepTime = Duration.ofMinutes(10);
log.log(Level.INFO, "Redeployment of " + applicationsToRedeploy + " not finished, will retry in " + sleepTime);
Thread.sleep(sleepTime.toMillis());
}
} while ( ! applicationsToRedeploy.isEmpty() && Instant.now().isBefore(end));
if ( ! applicationsToRedeploy.isEmpty()) {
log.log(Level.SEVERE, "Redeploying applications not finished after " + maxDurationOfRedeployment +
", exiting, applications that failed redeployment: " + applicationsToRedeploy);
return false;
}
return true;
}
private List<ApplicationId> redeployApplications(List<ApplicationId> applicationIds) throws InterruptedException {
ExecutorService executor = getExecutor();
log.log(Level.INFO, () -> "Redeploying " + applicationIds.size() + " apps: " + applicationIds);
PreparedApplications preparedApplications = prepare(applicationIds, executor);
if (preparedApplications.hasPrepareFailures()) {
log.log(Level.INFO, "Failed preparing applications: " + preparedApplications.failed());
return preparedApplications.failed();
}
List<ApplicationId> failed = activate(preparedApplications.deploymentInfos());
shutdownExecutor(executor);
return failed;
}
private void shutdownExecutor(ExecutorService executor) throws InterruptedException {
executor.shutdown();
if ( ! executor.awaitTermination(1, TimeUnit.HOURS)) {
log.log(Level.WARNING, "Awaiting termination of executor failed");
executor.shutdownNow();
}
}
private ExecutorService getExecutor() {
return Executors.newFixedThreadPool(configserverConfig.numRedeploymentThreads(),
new DaemonThreadFactory("redeploy-apps-"));
}
private PreparedApplications prepare(List<ApplicationId> applicationIds, ExecutorService executor) {
Map<ApplicationId, Future<Optional<Deployment>>> prepared = new HashMap<>();
applicationIds.forEach(appId -> prepared.put(appId, executor.submit(() -> {
log.log(Level.INFO, () -> "Preparing " + appId);
Optional<Deployment> deployment = applicationRepository.deployFromLocalActive(appId, true /* bootstrap */);
if (deployment.isPresent()) {
deployment.get().prepare();
log.log(Level.INFO, () -> appId + " prepared");
} else {
log.log(Level.INFO, () -> "No deployment present for appId, not prepared");
}
return deployment;
})));
return waitForPrepare(prepared);
}
private List<ApplicationId> activate(List<DeploymentInfo> deployments) {
List<ApplicationId> failedActivations = new ArrayList<>();
deployments.forEach(d -> {
ApplicationId applicationId = d.applicationId();
log.log(Level.INFO, () -> "Activating " + applicationId);
try {
d.deployment().ifPresent(Deployment::activate);
} catch (Exception e) {
log.log(Level.INFO, () -> "Failed activating " + applicationId + ":" + e.getMessage());
failedActivations.add(applicationId);
}
log.log(Level.INFO, () -> applicationId + " activated");
});
return failedActivations;
}
private void logProgress(int applicationCount, PreparedApplications preparedApplications) {
log.log(Level.INFO, () -> preparedApplications.successCount() + " of " + applicationCount + " apps prepared " +
"(" + preparedApplications.failCount() + " failed)");
}
private DeploymentInfo getDeploymentStatus(ApplicationId applicationId, Future<Optional<Deployment>> future) {
try {
Optional<Deployment> deployment = future.get(1, TimeUnit.MILLISECONDS);
return new DeploymentInfo(applicationId, DeploymentInfo.Status.success, deployment);
} catch (ExecutionException | InterruptedException e) {
if (e.getCause() instanceof TransientException) {
log.log(Level.INFO, "Preparing" + " " + applicationId +
" failed with transient error, will retry after bootstrap: " + Exceptions.toMessageString(e));
} else {
log.log(Level.WARNING, "Preparing" + " " + applicationId + " failed, will retry", e);
}
return new DeploymentInfo(applicationId, DeploymentInfo.Status.failed);
} catch (TimeoutException e) {
return new DeploymentInfo(applicationId, DeploymentInfo.Status.inProgress);
}
}
private static class DeploymentInfo {
public enum Status { inProgress, success, failed }
private final ApplicationId applicationId;
private final Status status;
private final Optional<Deployment> deployment;
public DeploymentInfo(ApplicationId applicationId, Status status) {
this(applicationId, status, Optional.empty());
}
public DeploymentInfo(ApplicationId applicationId, Status status, Optional<Deployment> deployment) {
this.applicationId = applicationId;
this.status = status;
this.deployment = deployment;
}
public ApplicationId applicationId() { return applicationId; }
public Status status() { return status; }
public Optional<Deployment> deployment() { return deployment; }
}
private static class PreparedApplications {
private final List<DeploymentInfo> deploymentInfos = new ArrayList<>();
public void add(DeploymentInfo deploymentInfo) {
this.deploymentInfos.add(deploymentInfo);
}
List<ApplicationId> success() { return withStatus(DeploymentInfo.Status.success); }
List<ApplicationId> failed() { return withStatus(DeploymentInfo.Status.failed); }
List<ApplicationId> withStatus(DeploymentInfo.Status status) {
return deploymentInfos.stream()
.filter(deploymentInfo -> deploymentInfo.status() == status)
.map(DeploymentInfo::applicationId)
.collect(Collectors.toList());
}
int successCount() { return success().size(); }
int failCount() { return failed().size(); }
boolean hasPrepareFailures() { return failCount() > 0; }
List<DeploymentInfo> deploymentInfos() { return deploymentInfos; }
boolean prepareFinished(ApplicationId applicationId) {
return failed().contains(applicationId) || success().contains(applicationId);
}
}
} | class ConfigServerBootstrap extends AbstractComponent implements Runnable {
private static final Logger log = Logger.getLogger(ConfigServerBootstrap.class.getName());
enum Mode { BOOTSTRAP_IN_CONSTRUCTOR, FOR_TESTING_NO_BOOTSTRAP_OF_APPS}
enum RedeployingApplicationsFails { EXIT_JVM, CONTINUE }
enum VipStatusMode { VIP_STATUS_FILE, VIP_STATUS_PROGRAMMATICALLY }
private final ApplicationRepository applicationRepository;
private final RpcServer server;
private final VersionState versionState;
private final StateMonitor stateMonitor;
private final VipStatus vipStatus;
private final ConfigserverConfig configserverConfig;
private final Duration maxDurationOfRedeployment;
private final Duration sleepTimeWhenRedeployingFails;
private final RedeployingApplicationsFails exitIfRedeployingApplicationsFails;
private final ExecutorService rpcServerExecutor;
@Inject
public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server,
VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus) {
this(applicationRepository, server, versionState, stateMonitor, vipStatus, BOOTSTRAP_IN_CONSTRUCTOR, EXIT_JVM,
applicationRepository.configserverConfig().hostedVespa()
? VipStatusMode.VIP_STATUS_FILE
: VipStatusMode.VIP_STATUS_PROGRAMMATICALLY);
}
ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState,
StateMonitor stateMonitor, VipStatus vipStatus, VipStatusMode vipStatusMode) {
this(applicationRepository, server, versionState, stateMonitor, vipStatus,
FOR_TESTING_NO_BOOTSTRAP_OF_APPS, CONTINUE, vipStatusMode);
}
private ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server,
VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus,
Mode mode, RedeployingApplicationsFails exitIfRedeployingApplicationsFails,
VipStatusMode vipStatusMode) {
this.applicationRepository = applicationRepository;
this.server = server;
this.versionState = versionState;
this.stateMonitor = stateMonitor;
this.vipStatus = vipStatus;
this.configserverConfig = applicationRepository.configserverConfig();
this.maxDurationOfRedeployment = Duration.ofSeconds(configserverConfig.maxDurationOfBootstrap());
this.sleepTimeWhenRedeployingFails = Duration.ofSeconds(configserverConfig.sleepTimeWhenRedeployingFails());
this.exitIfRedeployingApplicationsFails = exitIfRedeployingApplicationsFails;
rpcServerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config server RPC server"));
log.log(Level.FINE, () -> "Bootstrap mode: " + mode + ", VIP status mode: " + vipStatusMode);
initializing(vipStatusMode);
switch (mode) {
case BOOTSTRAP_IN_CONSTRUCTOR:
start();
break;
case FOR_TESTING_NO_BOOTSTRAP_OF_APPS:
break;
default:
throw new IllegalArgumentException("Unknown bootstrap mode " + mode + ", legal values: " + Arrays.toString(Mode.values()));
}
}
@Override
public void deconstruct() {
log.log(Level.INFO, "Stopping config server");
down();
server.stop();
log.log(Level.FINE, "RPC server stopped");
rpcServerExecutor.shutdown();
}
@Override
public void run() {
start();
do {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
log.log(Level.SEVERE, "Got interrupted", e);
break;
}
} while (server.isRunning());
down();
}
public void start() {
if (versionState.isUpgraded()) {
log.log(Level.INFO, "Config server upgrading from " + versionState.storedVersion() + " to "
+ versionState.currentVersion() + ". Redeploying all applications");
try {
if ( ! redeployAllApplications()) {
redeployingApplicationsFailed();
return;
}
versionState.saveNewVersion();
log.log(Level.INFO, "All applications redeployed successfully");
} catch (Exception e) {
log.log(Level.SEVERE, "Redeployment of applications failed", e);
redeployingApplicationsFailed();
return;
}
}
applicationRepository.bootstrappingDone();
startRpcServer();
up();
}
StateMonitor.Status status() {
return stateMonitor.status();
}
private void up() {
vipStatus.setInRotation(true);
}
private void down() {
vipStatus.setInRotation(false);
}
private void initializing(VipStatusMode vipStatusMode) {
stateMonitor.status(StateMonitor.Status.initializing);
if (vipStatusMode == VipStatusMode.VIP_STATUS_PROGRAMMATICALLY)
vipStatus.setInRotation(false);
}
private void startRpcServer() {
rpcServerExecutor.execute(server);
Instant end = Instant.now().plus(Duration.ofSeconds(10));
while (!server.isRunning() && Instant.now().isBefore(end)) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
log.log(Level.SEVERE, "Got interrupted", e);
break;
}
}
if (!server.isRunning())
throw new RuntimeException("RPC server not started in 10 seconds");
}
private void redeployingApplicationsFailed() {
if (exitIfRedeployingApplicationsFails == EXIT_JVM) System.exit(1);
}
private boolean redeployAllApplications() throws InterruptedException {
Instant end = Instant.now().plus(maxDurationOfRedeployment);
List<ApplicationId> applicationsToRedeploy = applicationRepository.listApplications();
Collections.shuffle(applicationsToRedeploy);
long failCount = 0;
do {
applicationsToRedeploy = redeployApplications(applicationsToRedeploy);
if ( ! applicationsToRedeploy.isEmpty() && ! sleepTimeWhenRedeployingFails.isZero()) {
Duration sleepTime = sleepTimeWhenRedeployingFails.multipliedBy(++failCount);
if (sleepTime.compareTo(Duration.ofMinutes(10)) > 0)
sleepTime = Duration.ofMinutes(10);
log.log(Level.INFO, "Redeployment of " + applicationsToRedeploy + " not finished, will retry in " + sleepTime);
Thread.sleep(sleepTime.toMillis());
}
} while ( ! applicationsToRedeploy.isEmpty() && Instant.now().isBefore(end));
if ( ! applicationsToRedeploy.isEmpty()) {
log.log(Level.SEVERE, "Redeploying applications not finished after " + maxDurationOfRedeployment +
", exiting, applications that failed redeployment: " + applicationsToRedeploy);
return false;
}
return true;
}
private List<ApplicationId> redeployApplications(List<ApplicationId> applicationIds) throws InterruptedException {
ExecutorService executor = getExecutor();
log.log(Level.INFO, () -> "Redeploying " + applicationIds.size() + " apps: " + applicationIds);
PreparedApplications preparedApplications = prepare(applicationIds, executor);
if (preparedApplications.hasPrepareFailures()) {
log.log(Level.INFO, "Failed preparing applications: " + preparedApplications.failed());
return preparedApplications.failed();
}
List<ApplicationId> failed = activate(preparedApplications.deploymentInfos());
shutdownExecutor(executor);
return failed;
}
private void shutdownExecutor(ExecutorService executor) throws InterruptedException {
executor.shutdown();
if ( ! executor.awaitTermination(1, TimeUnit.HOURS)) {
log.log(Level.WARNING, "Awaiting termination of executor failed");
executor.shutdownNow();
}
}
private ExecutorService getExecutor() {
return Executors.newFixedThreadPool(configserverConfig.numRedeploymentThreads(),
new DaemonThreadFactory("redeploy-apps-"));
}
private PreparedApplications prepare(List<ApplicationId> applicationIds, ExecutorService executor) {
Map<ApplicationId, Future<Optional<Deployment>>> prepared = new HashMap<>();
applicationIds.forEach(appId -> prepared.put(appId, executor.submit(() -> {
log.log(Level.INFO, () -> "Preparing " + appId);
Optional<Deployment> deployment = applicationRepository.deployFromLocalActive(appId, true /* bootstrap */);
if (deployment.isPresent()) {
deployment.get().prepare();
log.log(Level.INFO, () -> appId + " prepared");
} else {
log.log(Level.INFO, () -> "No deployment present for appId, not prepared");
}
return deployment;
})));
return waitForPrepare(prepared);
}
private List<ApplicationId> activate(List<DeploymentInfo> deployments) {
List<ApplicationId> failedActivations = new ArrayList<>();
deployments.forEach(d -> {
ApplicationId applicationId = d.applicationId();
log.log(Level.INFO, () -> "Activating " + applicationId);
try {
d.deployment().ifPresent(Deployment::activate);
} catch (Exception e) {
log.log(Level.INFO, () -> "Failed activating " + applicationId + ":" + e.getMessage());
failedActivations.add(applicationId);
}
log.log(Level.INFO, () -> applicationId + " activated");
});
return failedActivations;
}
private void logProgress(int applicationCount, PreparedApplications preparedApplications) {
log.log(Level.INFO, () -> preparedApplications.successCount() + " of " + applicationCount + " apps prepared " +
"(" + preparedApplications.failCount() + " failed)");
}
private DeploymentInfo getDeploymentStatus(ApplicationId applicationId, Future<Optional<Deployment>> future) {
try {
Optional<Deployment> deployment = future.get(1, TimeUnit.MILLISECONDS);
return new DeploymentInfo(applicationId, DeploymentInfo.Status.success, deployment);
} catch (ExecutionException | InterruptedException e) {
if (e.getCause() instanceof TransientException) {
log.log(Level.INFO, "Preparing" + " " + applicationId +
" failed with transient error, will retry after bootstrap: " + Exceptions.toMessageString(e));
} else {
log.log(Level.WARNING, "Preparing" + " " + applicationId + " failed, will retry", e);
}
return new DeploymentInfo(applicationId, DeploymentInfo.Status.failed);
} catch (TimeoutException e) {
return new DeploymentInfo(applicationId, DeploymentInfo.Status.inProgress);
}
}
private static class DeploymentInfo {
public enum Status { inProgress, success, failed }
private final ApplicationId applicationId;
private final Status status;
private final Optional<Deployment> deployment;
public DeploymentInfo(ApplicationId applicationId, Status status) {
this(applicationId, status, Optional.empty());
}
public DeploymentInfo(ApplicationId applicationId, Status status, Optional<Deployment> deployment) {
this.applicationId = applicationId;
this.status = status;
this.deployment = deployment;
}
public ApplicationId applicationId() { return applicationId; }
public Status status() { return status; }
public Optional<Deployment> deployment() { return deployment; }
}
private static class PreparedApplications {
private final List<DeploymentInfo> deploymentInfos = new ArrayList<>();
public void add(DeploymentInfo deploymentInfo) {
this.deploymentInfos.add(deploymentInfo);
}
List<ApplicationId> success() { return withStatus(DeploymentInfo.Status.success); }
List<ApplicationId> failed() { return withStatus(DeploymentInfo.Status.failed); }
List<ApplicationId> withStatus(DeploymentInfo.Status status) {
return deploymentInfos.stream()
.filter(deploymentInfo -> deploymentInfo.status() == status)
.map(DeploymentInfo::applicationId)
.collect(Collectors.toList());
}
int successCount() { return success().size(); }
int failCount() { return failed().size(); }
boolean hasPrepareFailures() { return failCount() > 0; }
List<DeploymentInfo> deploymentInfos() { return deploymentInfos; }
boolean prepareFinished(ApplicationId applicationId) {
return failed().contains(applicationId) || success().contains(applicationId);
}
}
} |
```suggestion ``` | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow);
NodeList allNodes = nodeRepository().nodes().list();
NodeList recentlyFailedNodes = allNodes.state(Node.State.failed)
.matching(n -> n.history().hasEventAfter(History.Event.Type.failed,
startOfThrottleWindow));
log.info("node = " + node + ", recentlyFailedNodes.size() = " + recentlyFailedNodes.size() +
", throttlePolicy.allowedToFailOf(" + allNodes.size() + ") = " +
throttlePolicy.allowedToFailOf(allNodes.size()));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false;
if (node.parentHostname().isEmpty() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
if (recentlyFailedNodes.parentOf(node).isPresent()) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(allNodes.size())));
return true;
} | throttlePolicy.allowedToFailOf(allNodes.size())); | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow);
NodeList allNodes = nodeRepository().nodes().list();
NodeList recentlyFailedNodes = allNodes.state(Node.State.failed)
.matching(n -> n.history().hasEventAfter(History.Event.Type.failed,
startOfThrottleWindow));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false;
if (node.parentHostname().isEmpty() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
if (recentlyFailedNodes.parentOf(node).isPresent()) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(allNodes.size())));
return true;
} | class NodeFailer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of hosts that we want to fail, but cannot due to throttling */
static final String throttledHostFailuresMetric = "throttledHostFailures";
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
static final String throttlingActiveMetric = "nodeFailThrottling";
private final Deployer deployer;
private final Duration downTimeLimit;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
public NodeFailer(Deployer deployer, NodeRepository nodeRepository,
Duration downTimeLimit, Duration interval, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric);
this.deployer = deployer;
this.downTimeLimit = downTimeLimit;
this.orchestrator = orchestrator;
this.constructionTime = nodeRepository.clock().instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
int attempts = 0;
int failures = 0;
int throttledHostFailures = 0;
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
attempts++;
Node node = entry.getKey();
if (throttle(node)) {
failures++;
if (node.type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().nodes().fail(node.hostname(), Agent.NodeFailer, reason);
}
}
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
attempts++;
Node node = entry.getKey();
if (!failAllowedFor(node.type())) continue;
if (throttle(node)) {
failures++;
if (node.type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
for (Node host : activeNodes.hosts().failing()) {
if ( ! activeNodes.childrenOf(host).isEmpty()) continue;
Optional<NodeMutex> locked = Optional.empty();
try {
attempts++;
locked = nodeRepository().nodes().lockAndGet(host);
if (locked.isEmpty()) continue;
nodeRepository().nodes().fail(List.of(locked.get().node()), Agent.NodeFailer,
"Host should be failed and have no tenant nodes");
}
catch (Exception e) {
failures++;
}
finally {
locked.ifPresent(NodeMutex::close);
}
}
int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures);
metric.set(throttlingActiveMetric, throttlingActive, null);
metric.set(throttledHostFailuresMetric, throttledHostFailures, null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
return asSuccessFactor(attempts, failures);
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock().instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock().instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().nodes().list(Node.State.ready)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
List<String> failureReports = reasonsToFailParentHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
nodesByFailureReason.put(node, "Host has failure reports: " + failureReports);
} else {
nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports);
}
}
}
return nodesByFailureReason;
}
private Map<Node, String> getActiveNodesByFailureReason() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Instant graceTimeEnd = clock().instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : activeNodes) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
if ( ! node.history().hasEventAfter(History.Event.Type.activated, graceTimeEnd))
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
}
else if (hostSuspended(node, activeNodes)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
if (hostNode.type().isHost()) {
List<String> failureReports = reasonsToFailParentHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
nodesByFailureReason.put(node, "Host has failure reports: " + failureReports);
} else {
nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports);
}
}
}
}
}
return nodesByFailureReason;
}
public static List<String> reasonsToFailParentHost(Node hostNode) {
return hostNode.reports().getReports().stream()
.filter(report -> report.getType().hostShouldBeFailed())
.map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription())
.collect(Collectors.toList());
}
/** Returns whether node has any kind of hardware issue */
static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.nodes().node(parent)).orElse(node);
return reasonsToFailParentHost(hostNode).size() > 0;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isHost();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended();
} catch (HostNameNotFoundException e) {
return false;
}
}
/** Is the node and all active children suspended? */
private boolean hostSuspended(Node node, NodeList activeNodes) {
if (!nodeSuspended(node)) return false;
if (node.parentHostname().isPresent()) return true;
return activeNodes.stream()
.filter(childNode -> childNode.parentHostname().isPresent() &&
childNode.parentHostname().get().equals(node.hostname()))
.allMatch(this::nodeSuspended);
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that.
* But we refuse to fail out config(host)/controller(host)
*/
private boolean failAllowedFor(NodeType nodeType) {
switch (nodeType) {
case tenant:
case host:
return true;
case proxy:
case proxyhost:
return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty();
default:
return false;
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if (deployment.isEmpty()) return false;
try (Mutex lock = nodeRepository().nodes().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().nodes().list().childrenOf(node)) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().nodes().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
wantToFail(node, true, lock);
try {
deployment.get().activate();
return true;
} catch (TransientException e) {
log.log(Level.INFO, "Failed to redeploy " + node.allocation().get().owner() +
" with a transient error, will be retried by application maintainer: " +
Exceptions.toMessageString(e));
return true;
} catch (RuntimeException e) {
nodeRepository().nodes().node(node.hostname())
.ifPresent(n -> wantToFail(n, false, lock));
log.log(Level.WARNING, "Could not fail " + node + " for " + node.allocation().get().owner() +
" for " + reason + ": " + Exceptions.toMessageString(e));
return false;
}
}
}
private void wantToFail(Node node, boolean wantToFail, Mutex lock) {
nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} | class NodeFailer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of hosts that we want to fail, but cannot due to throttling */
static final String throttledHostFailuresMetric = "throttledHostFailures";
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
static final String throttlingActiveMetric = "nodeFailThrottling";
private final Deployer deployer;
private final Duration downTimeLimit;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
public NodeFailer(Deployer deployer, NodeRepository nodeRepository,
Duration downTimeLimit, Duration interval, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric);
this.deployer = deployer;
this.downTimeLimit = downTimeLimit;
this.orchestrator = orchestrator;
this.constructionTime = nodeRepository.clock().instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
int attempts = 0;
int failures = 0;
int throttledHostFailures = 0;
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
attempts++;
Node node = entry.getKey();
if (throttle(node)) {
failures++;
if (node.type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().nodes().fail(node.hostname(), Agent.NodeFailer, reason);
}
}
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
attempts++;
Node node = entry.getKey();
if (!failAllowedFor(node.type())) continue;
if (throttle(node)) {
failures++;
if (node.type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
for (Node host : activeNodes.hosts().failing()) {
if ( ! activeNodes.childrenOf(host).isEmpty()) continue;
Optional<NodeMutex> locked = Optional.empty();
try {
attempts++;
locked = nodeRepository().nodes().lockAndGet(host);
if (locked.isEmpty()) continue;
nodeRepository().nodes().fail(List.of(locked.get().node()), Agent.NodeFailer,
"Host should be failed and have no tenant nodes");
}
catch (Exception e) {
failures++;
}
finally {
locked.ifPresent(NodeMutex::close);
}
}
int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures);
metric.set(throttlingActiveMetric, throttlingActive, null);
metric.set(throttledHostFailuresMetric, throttledHostFailures, null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
return asSuccessFactor(attempts, failures);
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock().instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock().instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().nodes().list(Node.State.ready)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
List<String> failureReports = reasonsToFailParentHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
nodesByFailureReason.put(node, "Host has failure reports: " + failureReports);
} else {
nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports);
}
}
}
return nodesByFailureReason;
}
private Map<Node, String> getActiveNodesByFailureReason() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Instant graceTimeEnd = clock().instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : activeNodes) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
if ( ! node.history().hasEventAfter(History.Event.Type.activated, graceTimeEnd))
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
}
else if (hostSuspended(node, activeNodes)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
if (hostNode.type().isHost()) {
List<String> failureReports = reasonsToFailParentHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
nodesByFailureReason.put(node, "Host has failure reports: " + failureReports);
} else {
nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports);
}
}
}
}
}
return nodesByFailureReason;
}
public static List<String> reasonsToFailParentHost(Node hostNode) {
return hostNode.reports().getReports().stream()
.filter(report -> report.getType().hostShouldBeFailed())
.map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription())
.collect(Collectors.toList());
}
/** Returns whether node has any kind of hardware issue */
static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.nodes().node(parent)).orElse(node);
return reasonsToFailParentHost(hostNode).size() > 0;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isHost();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended();
} catch (HostNameNotFoundException e) {
return false;
}
}
/** Is the node and all active children suspended? */
private boolean hostSuspended(Node node, NodeList activeNodes) {
if (!nodeSuspended(node)) return false;
if (node.parentHostname().isPresent()) return true;
return activeNodes.stream()
.filter(childNode -> childNode.parentHostname().isPresent() &&
childNode.parentHostname().get().equals(node.hostname()))
.allMatch(this::nodeSuspended);
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that.
* But we refuse to fail out config(host)/controller(host)
*/
private boolean failAllowedFor(NodeType nodeType) {
switch (nodeType) {
case tenant:
case host:
return true;
case proxy:
case proxyhost:
return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty();
default:
return false;
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if (deployment.isEmpty()) return false;
try (Mutex lock = nodeRepository().nodes().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().nodes().list().childrenOf(node)) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().nodes().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
wantToFail(node, true, lock);
try {
deployment.get().activate();
return true;
} catch (TransientException e) {
log.log(Level.INFO, "Failed to redeploy " + node.allocation().get().owner() +
" with a transient error, will be retried by application maintainer: " +
Exceptions.toMessageString(e));
return true;
} catch (RuntimeException e) {
nodeRepository().nodes().node(node.hostname())
.ifPresent(n -> wantToFail(n, false, lock));
log.log(Level.WARNING, "Could not fail " + node + " for " + node.allocation().get().owner() +
" for " + reason + ": " + Exceptions.toMessageString(e));
return false;
}
}
}
private void wantToFail(Node node, boolean wantToFail, Mutex lock) {
nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} |
Thank you | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow);
NodeList allNodes = nodeRepository().nodes().list();
NodeList recentlyFailedNodes = allNodes.state(Node.State.failed)
.matching(n -> n.history().hasEventAfter(History.Event.Type.failed,
startOfThrottleWindow));
log.info("node = " + node + ", recentlyFailedNodes.size() = " + recentlyFailedNodes.size() +
", throttlePolicy.allowedToFailOf(" + allNodes.size() + ") = " +
throttlePolicy.allowedToFailOf(allNodes.size()));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false;
if (node.parentHostname().isEmpty() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
if (recentlyFailedNodes.parentOf(node).isPresent()) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(allNodes.size())));
return true;
} | throttlePolicy.allowedToFailOf(allNodes.size())); | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow);
NodeList allNodes = nodeRepository().nodes().list();
NodeList recentlyFailedNodes = allNodes.state(Node.State.failed)
.matching(n -> n.history().hasEventAfter(History.Event.Type.failed,
startOfThrottleWindow));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false;
if (node.parentHostname().isEmpty() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
if (recentlyFailedNodes.parentOf(node).isPresent()) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(allNodes.size())));
return true;
} | class NodeFailer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of hosts that we want to fail, but cannot due to throttling */
static final String throttledHostFailuresMetric = "throttledHostFailures";
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
static final String throttlingActiveMetric = "nodeFailThrottling";
private final Deployer deployer;
private final Duration downTimeLimit;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
public NodeFailer(Deployer deployer, NodeRepository nodeRepository,
Duration downTimeLimit, Duration interval, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric);
this.deployer = deployer;
this.downTimeLimit = downTimeLimit;
this.orchestrator = orchestrator;
this.constructionTime = nodeRepository.clock().instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
int attempts = 0;
int failures = 0;
int throttledHostFailures = 0;
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
attempts++;
Node node = entry.getKey();
if (throttle(node)) {
failures++;
if (node.type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().nodes().fail(node.hostname(), Agent.NodeFailer, reason);
}
}
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
attempts++;
Node node = entry.getKey();
if (!failAllowedFor(node.type())) continue;
if (throttle(node)) {
failures++;
if (node.type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
for (Node host : activeNodes.hosts().failing()) {
if ( ! activeNodes.childrenOf(host).isEmpty()) continue;
Optional<NodeMutex> locked = Optional.empty();
try {
attempts++;
locked = nodeRepository().nodes().lockAndGet(host);
if (locked.isEmpty()) continue;
nodeRepository().nodes().fail(List.of(locked.get().node()), Agent.NodeFailer,
"Host should be failed and have no tenant nodes");
}
catch (Exception e) {
failures++;
}
finally {
locked.ifPresent(NodeMutex::close);
}
}
int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures);
metric.set(throttlingActiveMetric, throttlingActive, null);
metric.set(throttledHostFailuresMetric, throttledHostFailures, null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
return asSuccessFactor(attempts, failures);
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock().instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock().instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().nodes().list(Node.State.ready)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
List<String> failureReports = reasonsToFailParentHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
nodesByFailureReason.put(node, "Host has failure reports: " + failureReports);
} else {
nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports);
}
}
}
return nodesByFailureReason;
}
private Map<Node, String> getActiveNodesByFailureReason() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Instant graceTimeEnd = clock().instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : activeNodes) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
if ( ! node.history().hasEventAfter(History.Event.Type.activated, graceTimeEnd))
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
}
else if (hostSuspended(node, activeNodes)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
if (hostNode.type().isHost()) {
List<String> failureReports = reasonsToFailParentHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
nodesByFailureReason.put(node, "Host has failure reports: " + failureReports);
} else {
nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports);
}
}
}
}
}
return nodesByFailureReason;
}
public static List<String> reasonsToFailParentHost(Node hostNode) {
return hostNode.reports().getReports().stream()
.filter(report -> report.getType().hostShouldBeFailed())
.map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription())
.collect(Collectors.toList());
}
/** Returns whether node has any kind of hardware issue */
static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.nodes().node(parent)).orElse(node);
return reasonsToFailParentHost(hostNode).size() > 0;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isHost();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended();
} catch (HostNameNotFoundException e) {
return false;
}
}
/** Is the node and all active children suspended? */
private boolean hostSuspended(Node node, NodeList activeNodes) {
if (!nodeSuspended(node)) return false;
if (node.parentHostname().isPresent()) return true;
return activeNodes.stream()
.filter(childNode -> childNode.parentHostname().isPresent() &&
childNode.parentHostname().get().equals(node.hostname()))
.allMatch(this::nodeSuspended);
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that.
* But we refuse to fail out config(host)/controller(host)
*/
private boolean failAllowedFor(NodeType nodeType) {
switch (nodeType) {
case tenant:
case host:
return true;
case proxy:
case proxyhost:
return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty();
default:
return false;
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if (deployment.isEmpty()) return false;
try (Mutex lock = nodeRepository().nodes().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().nodes().list().childrenOf(node)) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().nodes().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
wantToFail(node, true, lock);
try {
deployment.get().activate();
return true;
} catch (TransientException e) {
log.log(Level.INFO, "Failed to redeploy " + node.allocation().get().owner() +
" with a transient error, will be retried by application maintainer: " +
Exceptions.toMessageString(e));
return true;
} catch (RuntimeException e) {
nodeRepository().nodes().node(node.hostname())
.ifPresent(n -> wantToFail(n, false, lock));
log.log(Level.WARNING, "Could not fail " + node + " for " + node.allocation().get().owner() +
" for " + reason + ": " + Exceptions.toMessageString(e));
return false;
}
}
}
private void wantToFail(Node node, boolean wantToFail, Mutex lock) {
nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} | class NodeFailer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of hosts that we want to fail, but cannot due to throttling */
static final String throttledHostFailuresMetric = "throttledHostFailures";
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
static final String throttlingActiveMetric = "nodeFailThrottling";
private final Deployer deployer;
private final Duration downTimeLimit;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
public NodeFailer(Deployer deployer, NodeRepository nodeRepository,
Duration downTimeLimit, Duration interval, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric);
this.deployer = deployer;
this.downTimeLimit = downTimeLimit;
this.orchestrator = orchestrator;
this.constructionTime = nodeRepository.clock().instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
}
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
int attempts = 0;
int failures = 0;
int throttledHostFailures = 0;
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
attempts++;
Node node = entry.getKey();
if (throttle(node)) {
failures++;
if (node.type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().nodes().fail(node.hostname(), Agent.NodeFailer, reason);
}
}
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
attempts++;
Node node = entry.getKey();
if (!failAllowedFor(node.type())) continue;
if (throttle(node)) {
failures++;
if (node.type().isHost())
throttledHostFailures++;
else
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
for (Node host : activeNodes.hosts().failing()) {
if ( ! activeNodes.childrenOf(host).isEmpty()) continue;
Optional<NodeMutex> locked = Optional.empty();
try {
attempts++;
locked = nodeRepository().nodes().lockAndGet(host);
if (locked.isEmpty()) continue;
nodeRepository().nodes().fail(List.of(locked.get().node()), Agent.NodeFailer,
"Host should be failed and have no tenant nodes");
}
catch (Exception e) {
failures++;
}
finally {
locked.ifPresent(NodeMutex::close);
}
}
int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures);
metric.set(throttlingActiveMetric, throttlingActive, null);
metric.set(throttledHostFailuresMetric, throttledHostFailures, null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
return asSuccessFactor(attempts, failures);
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock().instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock().instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().nodes().list(Node.State.ready)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
List<String> failureReports = reasonsToFailParentHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
nodesByFailureReason.put(node, "Host has failure reports: " + failureReports);
} else {
nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports);
}
}
}
return nodesByFailureReason;
}
private Map<Node, String> getActiveNodesByFailureReason() {
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Instant graceTimeEnd = clock().instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : activeNodes) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
if ( ! node.history().hasEventAfter(History.Event.Type.activated, graceTimeEnd))
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
}
else if (hostSuspended(node, activeNodes)) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
if (hostNode.type().isHost()) {
List<String> failureReports = reasonsToFailParentHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
nodesByFailureReason.put(node, "Host has failure reports: " + failureReports);
} else {
nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports);
}
}
}
}
}
return nodesByFailureReason;
}
public static List<String> reasonsToFailParentHost(Node hostNode) {
return hostNode.reports().getReports().stream()
.filter(report -> report.getType().hostShouldBeFailed())
.map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription())
.collect(Collectors.toList());
}
/** Returns whether node has any kind of hardware issue */
static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) {
Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.nodes().node(parent)).orElse(node);
return reasonsToFailParentHost(hostNode).size() > 0;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isHost();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended();
} catch (HostNameNotFoundException e) {
return false;
}
}
/** Is the node and all active children suspended? */
private boolean hostSuspended(Node node, NodeList activeNodes) {
if (!nodeSuspended(node)) return false;
if (node.parentHostname().isPresent()) return true;
return activeNodes.stream()
.filter(childNode -> childNode.parentHostname().isPresent() &&
childNode.parentHostname().get().equals(node.hostname()))
.allMatch(this::nodeSuspended);
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that.
* But we refuse to fail out config(host)/controller(host)
*/
private boolean failAllowedFor(NodeType nodeType) {
switch (nodeType) {
case tenant:
case host:
return true;
case proxy:
case proxyhost:
return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty();
default:
return false;
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if (deployment.isEmpty()) return false;
try (Mutex lock = nodeRepository().nodes().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().nodes().list().childrenOf(node)) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().nodes().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
wantToFail(node, true, lock);
try {
deployment.get().activate();
return true;
} catch (TransientException e) {
log.log(Level.INFO, "Failed to redeploy " + node.allocation().get().owner() +
" with a transient error, will be retried by application maintainer: " +
Exceptions.toMessageString(e));
return true;
} catch (RuntimeException e) {
nodeRepository().nodes().node(node.hostname())
.ifPresent(n -> wantToFail(n, false, lock));
log.log(Level.WARNING, "Could not fail " + node + " for " + node.allocation().get().owner() +
" for " + reason + ": " + Exceptions.toMessageString(e));
return false;
}
}
}
private void wantToFail(Node node, boolean wantToFail, Mutex lock) {
nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} |
I'll do this now... | public void node_failing_throttle() {
{
NodeFailTester tester = NodeFailTester.withTwoApplications(10);
NodeList hosts = tester.nodeRepository.nodes().list().nodeType(NodeType.host);
List<Node> deadNodes = new ArrayList<>();
List<Node> failedHosts = hosts.asList().subList(0, 3);
failedHosts.forEach(host -> {
tester.serviceMonitor.setHostDown(host.hostname());
deadNodes.add(host);
});
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(61));
tester.allNodesMakeAConfigRequestExcept(deadNodes);
tester.runMaintainers();
assertEquals(2 + /* hosts */
(2 * 3) /* containers per host */,
tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
for (int minutes = 0, interval = 30; minutes <= 23 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept(deadNodes);
}
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
tester.clock.advance(Duration.ofMinutes(30));
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept(deadNodes);
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
{
NodeFailTester tester = NodeFailTester.withTwoApplications(300, 100, 100);
NodeList allNodes = tester.nodeRepository.nodes().list();
assertEquals(500, allNodes.size());
tester.runMaintainers();
allNodes.state(Node.State.active)
.nodeType(NodeType.tenant)
.stream()
.limit(15)
.forEach(host -> tester.serviceMonitor.setHostDown(host.hostname()));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(2));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(6));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(18));
tester.runMaintainers();
assertEquals(15, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made.", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
} | public void node_failing_throttle() {
{
NodeFailTester tester = NodeFailTester.withTwoApplications(10);
NodeList hosts = tester.nodeRepository.nodes().list().nodeType(NodeType.host);
List<Node> failedHosts = hosts.asList().subList(0, 3);
failedHosts.forEach(host -> {
tester.serviceMonitor.setHostDown(host.hostname());
});
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(61));
tester.runMaintainers();
assertEquals(2 + /* hosts */
(2 * 3) /* containers per host */,
tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
for (int minutes = 0, interval = 30; minutes <= 23 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
}
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
tester.clock.advance(Duration.ofMinutes(30));
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(25));
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
{
NodeFailTester tester = NodeFailTester.withTwoApplications(300, 100, 100);
NodeList allNodes = tester.nodeRepository.nodes().list();
assertEquals(500, allNodes.size());
tester.runMaintainers();
allNodes.state(Node.State.active)
.nodeType(NodeType.tenant)
.stream()
.limit(15)
.forEach(host -> tester.serviceMonitor.setHostDown(host.hostname()));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(2));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(6));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(18));
tester.runMaintainers();
assertEquals(15, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made.", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
} | class NodeFailerTest {
private static final Report badTotalMemorySizeReport = Report.basicReport(
"badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
@Test
public void fail_nodes_with_severe_reports_if_allowed_to_be_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
testNodeFailingWith(tester, hostWithFailureReports);
}
private void testNodeFailingWith(NodeFailTester tester, String hostWithHwFailure) {
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
tester.suspend(hostnamesByState.get(Node.State.active).get(0));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Map<Node.State, List<String>> expectedHostnamesByState1Iter = Map.of(
Node.State.failed, List.of(hostnamesByState.get(Node.State.ready).get(0), hostnamesByState.get(Node.State.active).get(0)),
Node.State.active, hostnamesByState.get(Node.State.active).subList(1, 2));
Map<Node.State, List<String>> hostnamesByState1Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(expectedHostnamesByState1Iter, hostnamesByState1Iter);
tester.suspend(hostnamesByState.get(Node.State.active).get(1));
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Set<Node.State> childStates2Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.map(Node::state).collect(Collectors.toSet());
assertEquals(Set.of(Node.State.failed), childStates2Iter);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
tester.suspend(hostWithHwFailure);
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
public void hw_fail_only_if_whole_host_is_suspended() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithFailureReports).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
String activeChild1 = hostnamesByState.get(Node.State.active).get(0);
String activeChild2 = hostnamesByState.get(Node.State.active).get(1);
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
String readyChild = hostnamesByState.get(Node.State.ready).get(0);
Report badTotalMemorySizeReport = Report.basicReport("badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(hostWithFailureReports);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild1);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild2);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild2).get().state());
}
@Test
public void nodes_for_suspended_applications_are_not_failed() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.suspend(NodeFailTester.app1);
String host_from_suspended_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String host_from_normal_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(host_from_suspended_app);
tester.serviceMonitor.setHostDown(host_from_normal_app);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().node(host_from_normal_app).get().isDown());
assertTrue(tester.nodeRepository.nodes().node(host_from_suspended_app).get().isDown());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(host_from_normal_app).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(host_from_suspended_app).get().state());
}
@Test
public void zone_is_not_working_if_too_many_nodes_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(0).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(2).hostname());
tester.runMaintainers();
assertFalse(tester.nodeRepository.nodes().isWorking());
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue("Node failing is deactivated", tester.nodeRepository.nodes().list(Node.State.failed).isEmpty());
}
@Test
public void node_failing() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
Node readyFail1 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(2);
Node readyFail2 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(3);
tester.nodeRepository.nodes().write(readyFail1.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.nodeRepository.nodes().write(readyFail2.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
tester.runMaintainers();
assertEquals(2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail1.hostname()).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail2.hostname()).get().state());
String downHost1 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String downHost2 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(downHost1);
tester.serviceMonitor.setHostDown(downHost2);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.serviceMonitor.setHostUp(downHost1);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 1, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 1, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(downHost2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
tester.serviceMonitor.setHostDown(downHost1);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.clock.advance(Duration.ofMinutes(120));
tester.failer = tester.createFailer();
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1));
tester.serviceMonitor.setHostDown(lastNode.hostname());
for (int minutes = 0; minutes < 75; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.createReadyNodes(1, 16, NodeFailTester.nodeResources);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertTrue("The index of the last failed node is not reused",
tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1)).allocation().get().membership().index()
>
lastNode.allocation().get().membership().index());
assertEquals("Node failing does not cause recording of scaling events",
1,
tester.nodeRepository.applications().get(NodeFailTester.app1).get().cluster(NodeFailTester.testCluster).get().scalingEvents().size());
}
@Test
public void re_activate_grace_period_test() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
String downNode = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
tester.serviceMonitor.setHostDown(downNode);
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(75));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
tester.nodeRepository.nodes().reactivate(downNode, Agent.system, getClass().getSimpleName());
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(45));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
}
@Test
public void node_failing_can_allocate_spare() {
var resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(3, 1, resources), false, true);
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(4, resources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
tester.activate(NodeFailTester.app1, cluster, capacity);
String downHost = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).first().get().hostname();
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
}
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(downHost, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
}
@Test
public void node_failing_can_allocate_spare_to_replace_failed_node_in_group() {
NodeResources resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(4, 2, resources), false, true);
ClusterSpec spec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity, spec);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(5, resources);
tester.activate(NodeFailTester.app1, spec, capacity);
NodeList activeNodes = tester.nodeRepository.nodes().list(Node.State.active);
Node downNode = activeNodes.owner(NodeFailTester.app1).first().get();
Node downHost = activeNodes.parentOf(downNode).get();
tester.tester.patchNode(downHost, (node) -> node.with(node.reports().withReport(badTotalMemorySizeReport)));
tester.suspend(downHost.hostname());
tester.suspend(downNode.hostname());
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
NodeList failedOrActive = tester.nodeRepository.nodes().list(Node.State.active, Node.State.failed);
assertEquals(4, failedOrActive.state(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(Set.of(downNode.hostname()), failedOrActive.state(Node.State.failed).nodeType(NodeType.tenant).hostnames());
}
@Test
public void host_not_failed_without_config_requests() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(180));
Node host = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).first().get();
tester.allNodesMakeAConfigRequestExcept(host);
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
@Test
public void failing_hosts() {
NodeFailTester tester = NodeFailTester.withTwoApplications(7);
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
String downHost1 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.serviceMonitor.setHostDown(downHost1);
for (int minutes = 0; minutes < 45; minutes += 5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(2 + 1, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
Node downTenant1 = tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).first().get();
tester.serviceMonitor.setHostDown(downTenant1.hostname());
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(3 + 1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(3 + 1, tester.deployer.redeployments);
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost2 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost2);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(90));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(5 + 2, tester.deployer.redeployments);
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost3 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost3);
tester.runMaintainers();
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(6 + 2, tester.deployer.redeployments);
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
@Test
public void failing_proxy_nodes() {
test_infra_application_fail(NodeType.proxy, 10, 1);
}
@Test
public void failing_config_hosts() {
test_infra_application_fail(NodeType.confighost, 3, 0);
}
private void test_infra_application_fail(NodeType nodeType, int count, int expectedFailCount) {
NodeFailTester tester = NodeFailTester.withInfraApplication(nodeType, count);
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
Set<String> downHosts = Set.of("host2", "host3");
for (String downHost : downHosts)
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(expectedFailCount, tester.deployer.redeployments);
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
assertEquals(expectedFailCount, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType).size());
tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType)
.forEach(node -> assertTrue(downHosts.contains(node.hostname())));
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
@Test
public void failing_divergent_ready_nodes() {
NodeFailTester tester = NodeFailTester.withNoApplications();
Node readyNode = tester.createReadyNodes(1).get(0);
tester.runMaintainers();
assertEquals(Node.State.ready, readyNode.state());
tester.nodeRepository.nodes().write(readyNode.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
@Test
public void testUpness() {
assertFalse(badNode(0, 0, 0));
assertFalse(badNode(0, 0, 2));
assertFalse(badNode(0, 3, 0));
assertFalse(badNode(0, 3, 2));
assertTrue(badNode(1, 0, 0));
assertTrue(badNode(1, 0, 2));
assertFalse(badNode(1, 3, 0));
assertFalse(badNode(1, 3, 2));
}
private void addServiceInstances(List<ServiceInstance> list, ServiceStatus status, int num) {
for (int i = 0; i < num; ++i) {
ServiceInstance service = mock(ServiceInstance.class);
when(service.serviceStatus()).thenReturn(status);
list.add(service);
}
}
private boolean badNode(int numDown, int numUp, int numNotChecked) {
List<ServiceInstance> services = new ArrayList<>();
addServiceInstances(services, ServiceStatus.DOWN, numDown);
addServiceInstances(services, ServiceStatus.UP, numUp);
addServiceInstances(services, ServiceStatus.NOT_CHECKED, numNotChecked);
Collections.shuffle(services);
return NodeHealthTracker.allDown(services);
}
/**
* Selects the first parent host that:
* - has exactly n nodes in state 'active'
* - is not present in the 'except' array
*/
private static String selectFirstParentHostWithNActiveNodesExcept(NodeRepository nodeRepository, int n, String... except) {
Set<String> exceptSet = Arrays.stream(except).collect(Collectors.toSet());
return nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).stream()
.collect(Collectors.groupingBy(Node::parentHostname))
.entrySet().stream()
.filter(entry -> entry.getValue().size() == n)
.map(Map.Entry::getKey)
.flatMap(parentHost -> Stream.of(parentHost.get()))
.filter(node -> ! exceptSet.contains(node))
.findFirst().get();
}
} | class NodeFailerTest {
private static final Report badTotalMemorySizeReport = Report.basicReport(
"badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
@Test
public void fail_nodes_with_severe_reports_if_allowed_to_be_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
testNodeFailingWith(tester, hostWithFailureReports);
}
private void testNodeFailingWith(NodeFailTester tester, String hostWithHwFailure) {
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
tester.suspend(hostnamesByState.get(Node.State.active).get(0));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Map<Node.State, List<String>> expectedHostnamesByState1Iter = Map.of(
Node.State.failed, List.of(hostnamesByState.get(Node.State.ready).get(0), hostnamesByState.get(Node.State.active).get(0)),
Node.State.active, hostnamesByState.get(Node.State.active).subList(1, 2));
Map<Node.State, List<String>> hostnamesByState1Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(expectedHostnamesByState1Iter, hostnamesByState1Iter);
tester.suspend(hostnamesByState.get(Node.State.active).get(1));
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Set<Node.State> childStates2Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.map(Node::state).collect(Collectors.toSet());
assertEquals(Set.of(Node.State.failed), childStates2Iter);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
tester.suspend(hostWithHwFailure);
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
public void hw_fail_only_if_whole_host_is_suspended() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithFailureReports).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
String activeChild1 = hostnamesByState.get(Node.State.active).get(0);
String activeChild2 = hostnamesByState.get(Node.State.active).get(1);
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
String readyChild = hostnamesByState.get(Node.State.ready).get(0);
Report badTotalMemorySizeReport = Report.basicReport("badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(hostWithFailureReports);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild1);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild2);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild2).get().state());
}
@Test
public void nodes_for_suspended_applications_are_not_failed() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.suspend(NodeFailTester.app1);
String host_from_suspended_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String host_from_normal_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(host_from_suspended_app);
tester.serviceMonitor.setHostDown(host_from_normal_app);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().node(host_from_normal_app).get().isDown());
assertTrue(tester.nodeRepository.nodes().node(host_from_suspended_app).get().isDown());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(host_from_normal_app).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(host_from_suspended_app).get().state());
}
@Test
public void zone_is_not_working_if_too_many_nodes_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(0).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(2).hostname());
tester.runMaintainers();
assertFalse(tester.nodeRepository.nodes().isWorking());
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue("Node failing is deactivated", tester.nodeRepository.nodes().list(Node.State.failed).isEmpty());
}
@Test
public void node_failing() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
Node readyFail1 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(2);
Node readyFail2 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(3);
tester.nodeRepository.nodes().write(readyFail1.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.nodeRepository.nodes().write(readyFail2.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
tester.runMaintainers();
assertEquals(2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail1.hostname()).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail2.hostname()).get().state());
String downHost1 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String downHost2 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(downHost1);
tester.serviceMonitor.setHostDown(downHost2);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.serviceMonitor.setHostUp(downHost1);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 1, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 1, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(downHost2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
tester.serviceMonitor.setHostDown(downHost1);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.clock.advance(Duration.ofMinutes(120));
tester.failer = tester.createFailer();
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1));
tester.serviceMonitor.setHostDown(lastNode.hostname());
for (int minutes = 0; minutes < 75; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.createReadyNodes(1, 16, NodeFailTester.nodeResources);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertTrue("The index of the last failed node is not reused",
tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1)).allocation().get().membership().index()
>
lastNode.allocation().get().membership().index());
assertEquals("Node failing does not cause recording of scaling events",
1,
tester.nodeRepository.applications().get(NodeFailTester.app1).get().cluster(NodeFailTester.testCluster).get().scalingEvents().size());
}
@Test
public void re_activate_grace_period_test() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
String downNode = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
tester.serviceMonitor.setHostDown(downNode);
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(75));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
tester.nodeRepository.nodes().reactivate(downNode, Agent.system, getClass().getSimpleName());
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(45));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
}
@Test
public void node_failing_can_allocate_spare() {
var resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(3, 1, resources), false, true);
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(4, resources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
tester.activate(NodeFailTester.app1, cluster, capacity);
String downHost = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).first().get().hostname();
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
}
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(downHost, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
}
@Test
public void node_failing_can_allocate_spare_to_replace_failed_node_in_group() {
NodeResources resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(4, 2, resources), false, true);
ClusterSpec spec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity, spec);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(5, resources);
tester.activate(NodeFailTester.app1, spec, capacity);
NodeList activeNodes = tester.nodeRepository.nodes().list(Node.State.active);
Node downNode = activeNodes.owner(NodeFailTester.app1).first().get();
Node downHost = activeNodes.parentOf(downNode).get();
tester.tester.patchNode(downHost, (node) -> node.with(node.reports().withReport(badTotalMemorySizeReport)));
tester.suspend(downHost.hostname());
tester.suspend(downNode.hostname());
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
NodeList failedOrActive = tester.nodeRepository.nodes().list(Node.State.active, Node.State.failed);
assertEquals(4, failedOrActive.state(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(Set.of(downNode.hostname()), failedOrActive.state(Node.State.failed).nodeType(NodeType.tenant).hostnames());
}
@Test
public void host_not_failed_without_config_requests() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(180));
Node host = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).first().get();
tester.allNodesMakeAConfigRequestExcept(host);
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
@Test
public void failing_hosts() {
NodeFailTester tester = NodeFailTester.withTwoApplications(7);
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
String downHost1 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.serviceMonitor.setHostDown(downHost1);
for (int minutes = 0; minutes < 45; minutes += 5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(2 + 1, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
Node downTenant1 = tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).first().get();
tester.serviceMonitor.setHostDown(downTenant1.hostname());
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(3 + 1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(3 + 1, tester.deployer.redeployments);
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost2 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost2);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(90));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(5 + 2, tester.deployer.redeployments);
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost3 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost3);
tester.runMaintainers();
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(6 + 2, tester.deployer.redeployments);
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
@Test
public void failing_proxy_nodes() {
test_infra_application_fail(NodeType.proxy, 10, 1);
}
@Test
public void failing_config_hosts() {
test_infra_application_fail(NodeType.confighost, 3, 0);
}
private void test_infra_application_fail(NodeType nodeType, int count, int expectedFailCount) {
NodeFailTester tester = NodeFailTester.withInfraApplication(nodeType, count);
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
Set<String> downHosts = Set.of("host2", "host3");
for (String downHost : downHosts)
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(expectedFailCount, tester.deployer.redeployments);
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
assertEquals(expectedFailCount, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType).size());
tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType)
.forEach(node -> assertTrue(downHosts.contains(node.hostname())));
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
@Test
public void failing_divergent_ready_nodes() {
NodeFailTester tester = NodeFailTester.withNoApplications();
Node readyNode = tester.createReadyNodes(1).get(0);
tester.runMaintainers();
assertEquals(Node.State.ready, readyNode.state());
tester.nodeRepository.nodes().write(readyNode.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
@Test
public void testUpness() {
assertFalse(badNode(0, 0, 0));
assertFalse(badNode(0, 0, 2));
assertFalse(badNode(0, 3, 0));
assertFalse(badNode(0, 3, 2));
assertTrue(badNode(1, 0, 0));
assertTrue(badNode(1, 0, 2));
assertFalse(badNode(1, 3, 0));
assertFalse(badNode(1, 3, 2));
}
private void addServiceInstances(List<ServiceInstance> list, ServiceStatus status, int num) {
for (int i = 0; i < num; ++i) {
ServiceInstance service = mock(ServiceInstance.class);
when(service.serviceStatus()).thenReturn(status);
list.add(service);
}
}
private boolean badNode(int numDown, int numUp, int numNotChecked) {
List<ServiceInstance> services = new ArrayList<>();
addServiceInstances(services, ServiceStatus.DOWN, numDown);
addServiceInstances(services, ServiceStatus.UP, numUp);
addServiceInstances(services, ServiceStatus.NOT_CHECKED, numNotChecked);
Collections.shuffle(services);
return NodeHealthTracker.allDown(services);
}
/**
* Selects the first parent host that:
* - has exactly n nodes in state 'active'
* - is not present in the 'except' array
*/
private static String selectFirstParentHostWithNActiveNodesExcept(NodeRepository nodeRepository, int n, String... except) {
Set<String> exceptSet = Arrays.stream(except).collect(Collectors.toSet());
return nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).stream()
.collect(Collectors.groupingBy(Node::parentHostname))
.entrySet().stream()
.filter(entry -> entry.getValue().size() == n)
.map(Map.Entry::getKey)
.flatMap(parentHost -> Stream.of(parentHost.get()))
.filter(node -> ! exceptSet.contains(node))
.findFirst().get();
}
} | |
done | public void node_failing_throttle() {
{
NodeFailTester tester = NodeFailTester.withTwoApplications(10);
NodeList hosts = tester.nodeRepository.nodes().list().nodeType(NodeType.host);
List<Node> deadNodes = new ArrayList<>();
List<Node> failedHosts = hosts.asList().subList(0, 3);
failedHosts.forEach(host -> {
tester.serviceMonitor.setHostDown(host.hostname());
deadNodes.add(host);
});
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(61));
tester.allNodesMakeAConfigRequestExcept(deadNodes);
tester.runMaintainers();
assertEquals(2 + /* hosts */
(2 * 3) /* containers per host */,
tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
for (int minutes = 0, interval = 30; minutes <= 23 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept(deadNodes);
}
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
tester.clock.advance(Duration.ofMinutes(30));
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept(deadNodes);
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
{
NodeFailTester tester = NodeFailTester.withTwoApplications(300, 100, 100);
NodeList allNodes = tester.nodeRepository.nodes().list();
assertEquals(500, allNodes.size());
tester.runMaintainers();
allNodes.state(Node.State.active)
.nodeType(NodeType.tenant)
.stream()
.limit(15)
.forEach(host -> tester.serviceMonitor.setHostDown(host.hostname()));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(2));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(6));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(18));
tester.runMaintainers();
assertEquals(15, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made.", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
} | public void node_failing_throttle() {
{
NodeFailTester tester = NodeFailTester.withTwoApplications(10);
NodeList hosts = tester.nodeRepository.nodes().list().nodeType(NodeType.host);
List<Node> failedHosts = hosts.asList().subList(0, 3);
failedHosts.forEach(host -> {
tester.serviceMonitor.setHostDown(host.hostname());
});
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(61));
tester.runMaintainers();
assertEquals(2 + /* hosts */
(2 * 3) /* containers per host */,
tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
for (int minutes = 0, interval = 30; minutes <= 23 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
}
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
tester.clock.advance(Duration.ofMinutes(30));
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(25));
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
{
NodeFailTester tester = NodeFailTester.withTwoApplications(300, 100, 100);
NodeList allNodes = tester.nodeRepository.nodes().list();
assertEquals(500, allNodes.size());
tester.runMaintainers();
allNodes.state(Node.State.active)
.nodeType(NodeType.tenant)
.stream()
.limit(15)
.forEach(host -> tester.serviceMonitor.setHostDown(host.hostname()));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(2));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(6));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(18));
tester.runMaintainers();
assertEquals(15, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made.", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
} | class NodeFailerTest {
private static final Report badTotalMemorySizeReport = Report.basicReport(
"badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
@Test
public void fail_nodes_with_severe_reports_if_allowed_to_be_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
testNodeFailingWith(tester, hostWithFailureReports);
}
private void testNodeFailingWith(NodeFailTester tester, String hostWithHwFailure) {
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
tester.suspend(hostnamesByState.get(Node.State.active).get(0));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Map<Node.State, List<String>> expectedHostnamesByState1Iter = Map.of(
Node.State.failed, List.of(hostnamesByState.get(Node.State.ready).get(0), hostnamesByState.get(Node.State.active).get(0)),
Node.State.active, hostnamesByState.get(Node.State.active).subList(1, 2));
Map<Node.State, List<String>> hostnamesByState1Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(expectedHostnamesByState1Iter, hostnamesByState1Iter);
tester.suspend(hostnamesByState.get(Node.State.active).get(1));
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Set<Node.State> childStates2Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.map(Node::state).collect(Collectors.toSet());
assertEquals(Set.of(Node.State.failed), childStates2Iter);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
tester.suspend(hostWithHwFailure);
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
public void hw_fail_only_if_whole_host_is_suspended() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithFailureReports).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
String activeChild1 = hostnamesByState.get(Node.State.active).get(0);
String activeChild2 = hostnamesByState.get(Node.State.active).get(1);
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
String readyChild = hostnamesByState.get(Node.State.ready).get(0);
Report badTotalMemorySizeReport = Report.basicReport("badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(hostWithFailureReports);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild1);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild2);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild2).get().state());
}
@Test
public void nodes_for_suspended_applications_are_not_failed() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.suspend(NodeFailTester.app1);
String host_from_suspended_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String host_from_normal_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(host_from_suspended_app);
tester.serviceMonitor.setHostDown(host_from_normal_app);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().node(host_from_normal_app).get().isDown());
assertTrue(tester.nodeRepository.nodes().node(host_from_suspended_app).get().isDown());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(host_from_normal_app).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(host_from_suspended_app).get().state());
}
@Test
public void zone_is_not_working_if_too_many_nodes_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(0).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(2).hostname());
tester.runMaintainers();
assertFalse(tester.nodeRepository.nodes().isWorking());
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue("Node failing is deactivated", tester.nodeRepository.nodes().list(Node.State.failed).isEmpty());
}
@Test
public void node_failing() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
Node readyFail1 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(2);
Node readyFail2 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(3);
tester.nodeRepository.nodes().write(readyFail1.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.nodeRepository.nodes().write(readyFail2.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
tester.runMaintainers();
assertEquals(2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail1.hostname()).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail2.hostname()).get().state());
String downHost1 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String downHost2 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(downHost1);
tester.serviceMonitor.setHostDown(downHost2);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.serviceMonitor.setHostUp(downHost1);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 1, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 1, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(downHost2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
tester.serviceMonitor.setHostDown(downHost1);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.clock.advance(Duration.ofMinutes(120));
tester.failer = tester.createFailer();
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1));
tester.serviceMonitor.setHostDown(lastNode.hostname());
for (int minutes = 0; minutes < 75; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.createReadyNodes(1, 16, NodeFailTester.nodeResources);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertTrue("The index of the last failed node is not reused",
tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1)).allocation().get().membership().index()
>
lastNode.allocation().get().membership().index());
assertEquals("Node failing does not cause recording of scaling events",
1,
tester.nodeRepository.applications().get(NodeFailTester.app1).get().cluster(NodeFailTester.testCluster).get().scalingEvents().size());
}
@Test
public void re_activate_grace_period_test() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
String downNode = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
tester.serviceMonitor.setHostDown(downNode);
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(75));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
tester.nodeRepository.nodes().reactivate(downNode, Agent.system, getClass().getSimpleName());
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(45));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
}
@Test
public void node_failing_can_allocate_spare() {
var resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(3, 1, resources), false, true);
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(4, resources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
tester.activate(NodeFailTester.app1, cluster, capacity);
String downHost = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).first().get().hostname();
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
}
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(downHost, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
}
@Test
public void node_failing_can_allocate_spare_to_replace_failed_node_in_group() {
NodeResources resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(4, 2, resources), false, true);
ClusterSpec spec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity, spec);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(5, resources);
tester.activate(NodeFailTester.app1, spec, capacity);
NodeList activeNodes = tester.nodeRepository.nodes().list(Node.State.active);
Node downNode = activeNodes.owner(NodeFailTester.app1).first().get();
Node downHost = activeNodes.parentOf(downNode).get();
tester.tester.patchNode(downHost, (node) -> node.with(node.reports().withReport(badTotalMemorySizeReport)));
tester.suspend(downHost.hostname());
tester.suspend(downNode.hostname());
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
NodeList failedOrActive = tester.nodeRepository.nodes().list(Node.State.active, Node.State.failed);
assertEquals(4, failedOrActive.state(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(Set.of(downNode.hostname()), failedOrActive.state(Node.State.failed).nodeType(NodeType.tenant).hostnames());
}
@Test
public void host_not_failed_without_config_requests() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(180));
Node host = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).first().get();
tester.allNodesMakeAConfigRequestExcept(host);
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
@Test
public void failing_hosts() {
NodeFailTester tester = NodeFailTester.withTwoApplications(7);
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
String downHost1 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.serviceMonitor.setHostDown(downHost1);
for (int minutes = 0; minutes < 45; minutes += 5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(2 + 1, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
Node downTenant1 = tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).first().get();
tester.serviceMonitor.setHostDown(downTenant1.hostname());
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(3 + 1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(3 + 1, tester.deployer.redeployments);
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost2 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost2);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(90));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(5 + 2, tester.deployer.redeployments);
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost3 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost3);
tester.runMaintainers();
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(6 + 2, tester.deployer.redeployments);
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
@Test
public void failing_proxy_nodes() {
test_infra_application_fail(NodeType.proxy, 10, 1);
}
@Test
public void failing_config_hosts() {
test_infra_application_fail(NodeType.confighost, 3, 0);
}
private void test_infra_application_fail(NodeType nodeType, int count, int expectedFailCount) {
NodeFailTester tester = NodeFailTester.withInfraApplication(nodeType, count);
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
Set<String> downHosts = Set.of("host2", "host3");
for (String downHost : downHosts)
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(expectedFailCount, tester.deployer.redeployments);
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
assertEquals(expectedFailCount, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType).size());
tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType)
.forEach(node -> assertTrue(downHosts.contains(node.hostname())));
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
@Test
public void failing_divergent_ready_nodes() {
NodeFailTester tester = NodeFailTester.withNoApplications();
Node readyNode = tester.createReadyNodes(1).get(0);
tester.runMaintainers();
assertEquals(Node.State.ready, readyNode.state());
tester.nodeRepository.nodes().write(readyNode.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
@Test
public void testUpness() {
assertFalse(badNode(0, 0, 0));
assertFalse(badNode(0, 0, 2));
assertFalse(badNode(0, 3, 0));
assertFalse(badNode(0, 3, 2));
assertTrue(badNode(1, 0, 0));
assertTrue(badNode(1, 0, 2));
assertFalse(badNode(1, 3, 0));
assertFalse(badNode(1, 3, 2));
}
private void addServiceInstances(List<ServiceInstance> list, ServiceStatus status, int num) {
for (int i = 0; i < num; ++i) {
ServiceInstance service = mock(ServiceInstance.class);
when(service.serviceStatus()).thenReturn(status);
list.add(service);
}
}
private boolean badNode(int numDown, int numUp, int numNotChecked) {
List<ServiceInstance> services = new ArrayList<>();
addServiceInstances(services, ServiceStatus.DOWN, numDown);
addServiceInstances(services, ServiceStatus.UP, numUp);
addServiceInstances(services, ServiceStatus.NOT_CHECKED, numNotChecked);
Collections.shuffle(services);
return NodeHealthTracker.allDown(services);
}
/**
* Selects the first parent host that:
* - has exactly n nodes in state 'active'
* - is not present in the 'except' array
*/
private static String selectFirstParentHostWithNActiveNodesExcept(NodeRepository nodeRepository, int n, String... except) {
Set<String> exceptSet = Arrays.stream(except).collect(Collectors.toSet());
return nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).stream()
.collect(Collectors.groupingBy(Node::parentHostname))
.entrySet().stream()
.filter(entry -> entry.getValue().size() == n)
.map(Map.Entry::getKey)
.flatMap(parentHost -> Stream.of(parentHost.get()))
.filter(node -> ! exceptSet.contains(node))
.findFirst().get();
}
} | class NodeFailerTest {
private static final Report badTotalMemorySizeReport = Report.basicReport(
"badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
@Test
public void fail_nodes_with_severe_reports_if_allowed_to_be_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
testNodeFailingWith(tester, hostWithFailureReports);
}
private void testNodeFailingWith(NodeFailTester tester, String hostWithHwFailure) {
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
tester.suspend(hostnamesByState.get(Node.State.active).get(0));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Map<Node.State, List<String>> expectedHostnamesByState1Iter = Map.of(
Node.State.failed, List.of(hostnamesByState.get(Node.State.ready).get(0), hostnamesByState.get(Node.State.active).get(0)),
Node.State.active, hostnamesByState.get(Node.State.active).subList(1, 2));
Map<Node.State, List<String>> hostnamesByState1Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(expectedHostnamesByState1Iter, hostnamesByState1Iter);
tester.suspend(hostnamesByState.get(Node.State.active).get(1));
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Set<Node.State> childStates2Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.map(Node::state).collect(Collectors.toSet());
assertEquals(Set.of(Node.State.failed), childStates2Iter);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
tester.suspend(hostWithHwFailure);
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
public void hw_fail_only_if_whole_host_is_suspended() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithFailureReports).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
String activeChild1 = hostnamesByState.get(Node.State.active).get(0);
String activeChild2 = hostnamesByState.get(Node.State.active).get(1);
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
String readyChild = hostnamesByState.get(Node.State.ready).get(0);
Report badTotalMemorySizeReport = Report.basicReport("badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(hostWithFailureReports);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild1);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild2);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild2).get().state());
}
@Test
public void nodes_for_suspended_applications_are_not_failed() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.suspend(NodeFailTester.app1);
String host_from_suspended_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String host_from_normal_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(host_from_suspended_app);
tester.serviceMonitor.setHostDown(host_from_normal_app);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().node(host_from_normal_app).get().isDown());
assertTrue(tester.nodeRepository.nodes().node(host_from_suspended_app).get().isDown());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(host_from_normal_app).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(host_from_suspended_app).get().state());
}
@Test
public void zone_is_not_working_if_too_many_nodes_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(0).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(2).hostname());
tester.runMaintainers();
assertFalse(tester.nodeRepository.nodes().isWorking());
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue("Node failing is deactivated", tester.nodeRepository.nodes().list(Node.State.failed).isEmpty());
}
@Test
public void node_failing() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
Node readyFail1 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(2);
Node readyFail2 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(3);
tester.nodeRepository.nodes().write(readyFail1.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.nodeRepository.nodes().write(readyFail2.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
tester.runMaintainers();
assertEquals(2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail1.hostname()).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail2.hostname()).get().state());
String downHost1 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String downHost2 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(downHost1);
tester.serviceMonitor.setHostDown(downHost2);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.serviceMonitor.setHostUp(downHost1);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 1, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 1, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(downHost2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
tester.serviceMonitor.setHostDown(downHost1);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.clock.advance(Duration.ofMinutes(120));
tester.failer = tester.createFailer();
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1));
tester.serviceMonitor.setHostDown(lastNode.hostname());
for (int minutes = 0; minutes < 75; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.createReadyNodes(1, 16, NodeFailTester.nodeResources);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertTrue("The index of the last failed node is not reused",
tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1)).allocation().get().membership().index()
>
lastNode.allocation().get().membership().index());
assertEquals("Node failing does not cause recording of scaling events",
1,
tester.nodeRepository.applications().get(NodeFailTester.app1).get().cluster(NodeFailTester.testCluster).get().scalingEvents().size());
}
@Test
public void re_activate_grace_period_test() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
String downNode = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
tester.serviceMonitor.setHostDown(downNode);
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(75));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
tester.nodeRepository.nodes().reactivate(downNode, Agent.system, getClass().getSimpleName());
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(45));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
}
@Test
public void node_failing_can_allocate_spare() {
var resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(3, 1, resources), false, true);
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(4, resources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
tester.activate(NodeFailTester.app1, cluster, capacity);
String downHost = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).first().get().hostname();
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
}
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(downHost, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
}
@Test
public void node_failing_can_allocate_spare_to_replace_failed_node_in_group() {
NodeResources resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(4, 2, resources), false, true);
ClusterSpec spec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity, spec);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(5, resources);
tester.activate(NodeFailTester.app1, spec, capacity);
NodeList activeNodes = tester.nodeRepository.nodes().list(Node.State.active);
Node downNode = activeNodes.owner(NodeFailTester.app1).first().get();
Node downHost = activeNodes.parentOf(downNode).get();
tester.tester.patchNode(downHost, (node) -> node.with(node.reports().withReport(badTotalMemorySizeReport)));
tester.suspend(downHost.hostname());
tester.suspend(downNode.hostname());
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
NodeList failedOrActive = tester.nodeRepository.nodes().list(Node.State.active, Node.State.failed);
assertEquals(4, failedOrActive.state(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(Set.of(downNode.hostname()), failedOrActive.state(Node.State.failed).nodeType(NodeType.tenant).hostnames());
}
@Test
public void host_not_failed_without_config_requests() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(180));
Node host = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).first().get();
tester.allNodesMakeAConfigRequestExcept(host);
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
@Test
public void failing_hosts() {
NodeFailTester tester = NodeFailTester.withTwoApplications(7);
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
String downHost1 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.serviceMonitor.setHostDown(downHost1);
for (int minutes = 0; minutes < 45; minutes += 5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(2 + 1, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
Node downTenant1 = tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).first().get();
tester.serviceMonitor.setHostDown(downTenant1.hostname());
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(3 + 1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(3 + 1, tester.deployer.redeployments);
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost2 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost2);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(90));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(5 + 2, tester.deployer.redeployments);
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost3 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost3);
tester.runMaintainers();
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(6 + 2, tester.deployer.redeployments);
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
@Test
public void failing_proxy_nodes() {
test_infra_application_fail(NodeType.proxy, 10, 1);
}
@Test
public void failing_config_hosts() {
test_infra_application_fail(NodeType.confighost, 3, 0);
}
private void test_infra_application_fail(NodeType nodeType, int count, int expectedFailCount) {
NodeFailTester tester = NodeFailTester.withInfraApplication(nodeType, count);
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
Set<String> downHosts = Set.of("host2", "host3");
for (String downHost : downHosts)
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(expectedFailCount, tester.deployer.redeployments);
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
assertEquals(expectedFailCount, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType).size());
tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType)
.forEach(node -> assertTrue(downHosts.contains(node.hostname())));
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
@Test
public void failing_divergent_ready_nodes() {
NodeFailTester tester = NodeFailTester.withNoApplications();
Node readyNode = tester.createReadyNodes(1).get(0);
tester.runMaintainers();
assertEquals(Node.State.ready, readyNode.state());
tester.nodeRepository.nodes().write(readyNode.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
@Test
public void testUpness() {
assertFalse(badNode(0, 0, 0));
assertFalse(badNode(0, 0, 2));
assertFalse(badNode(0, 3, 0));
assertFalse(badNode(0, 3, 2));
assertTrue(badNode(1, 0, 0));
assertTrue(badNode(1, 0, 2));
assertFalse(badNode(1, 3, 0));
assertFalse(badNode(1, 3, 2));
}
private void addServiceInstances(List<ServiceInstance> list, ServiceStatus status, int num) {
for (int i = 0; i < num; ++i) {
ServiceInstance service = mock(ServiceInstance.class);
when(service.serviceStatus()).thenReturn(status);
list.add(service);
}
}
private boolean badNode(int numDown, int numUp, int numNotChecked) {
List<ServiceInstance> services = new ArrayList<>();
addServiceInstances(services, ServiceStatus.DOWN, numDown);
addServiceInstances(services, ServiceStatus.UP, numUp);
addServiceInstances(services, ServiceStatus.NOT_CHECKED, numNotChecked);
Collections.shuffle(services);
return NodeHealthTracker.allDown(services);
}
/**
* Selects the first parent host that:
* - has exactly n nodes in state 'active'
* - is not present in the 'except' array
*/
private static String selectFirstParentHostWithNActiveNodesExcept(NodeRepository nodeRepository, int n, String... except) {
Set<String> exceptSet = Arrays.stream(except).collect(Collectors.toSet());
return nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).stream()
.collect(Collectors.groupingBy(Node::parentHostname))
.entrySet().stream()
.filter(entry -> entry.getValue().size() == n)
.map(Map.Entry::getKey)
.flatMap(parentHost -> Stream.of(parentHost.get()))
.filter(node -> ! exceptSet.contains(node))
.findFirst().get();
}
} | |
:100: | public void node_failing_throttle() {
{
NodeFailTester tester = NodeFailTester.withTwoApplications(10);
NodeList hosts = tester.nodeRepository.nodes().list().nodeType(NodeType.host);
List<Node> deadNodes = new ArrayList<>();
List<Node> failedHosts = hosts.asList().subList(0, 3);
failedHosts.forEach(host -> {
tester.serviceMonitor.setHostDown(host.hostname());
deadNodes.add(host);
});
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(61));
tester.allNodesMakeAConfigRequestExcept(deadNodes);
tester.runMaintainers();
assertEquals(2 + /* hosts */
(2 * 3) /* containers per host */,
tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
for (int minutes = 0, interval = 30; minutes <= 23 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept(deadNodes);
}
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
tester.clock.advance(Duration.ofMinutes(30));
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept(deadNodes);
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
{
NodeFailTester tester = NodeFailTester.withTwoApplications(300, 100, 100);
NodeList allNodes = tester.nodeRepository.nodes().list();
assertEquals(500, allNodes.size());
tester.runMaintainers();
allNodes.state(Node.State.active)
.nodeType(NodeType.tenant)
.stream()
.limit(15)
.forEach(host -> tester.serviceMonitor.setHostDown(host.hostname()));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(2));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(6));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(18));
tester.runMaintainers();
assertEquals(15, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made.", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
} | public void node_failing_throttle() {
{
NodeFailTester tester = NodeFailTester.withTwoApplications(10);
NodeList hosts = tester.nodeRepository.nodes().list().nodeType(NodeType.host);
List<Node> failedHosts = hosts.asList().subList(0, 3);
failedHosts.forEach(host -> {
tester.serviceMonitor.setHostDown(host.hostname());
});
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(61));
tester.runMaintainers();
assertEquals(2 + /* hosts */
(2 * 3) /* containers per host */,
tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
for (int minutes = 0, interval = 30; minutes <= 23 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
}
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
tester.clock.advance(Duration.ofMinutes(30));
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(25));
tester.runMaintainers();
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
{
NodeFailTester tester = NodeFailTester.withTwoApplications(300, 100, 100);
NodeList allNodes = tester.nodeRepository.nodes().list();
assertEquals(500, allNodes.size());
tester.runMaintainers();
allNodes.state(Node.State.active)
.nodeType(NodeType.tenant)
.stream()
.limit(15)
.forEach(host -> tester.serviceMonitor.setHostDown(host.hostname()));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(2));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(6));
tester.runMaintainers();
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
tester.clock.advance(Duration.ofHours(18));
tester.runMaintainers();
assertEquals(15, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made.", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
} | class NodeFailerTest {
private static final Report badTotalMemorySizeReport = Report.basicReport(
"badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
@Test
public void fail_nodes_with_severe_reports_if_allowed_to_be_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
testNodeFailingWith(tester, hostWithFailureReports);
}
private void testNodeFailingWith(NodeFailTester tester, String hostWithHwFailure) {
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
tester.suspend(hostnamesByState.get(Node.State.active).get(0));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Map<Node.State, List<String>> expectedHostnamesByState1Iter = Map.of(
Node.State.failed, List.of(hostnamesByState.get(Node.State.ready).get(0), hostnamesByState.get(Node.State.active).get(0)),
Node.State.active, hostnamesByState.get(Node.State.active).subList(1, 2));
Map<Node.State, List<String>> hostnamesByState1Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(expectedHostnamesByState1Iter, hostnamesByState1Iter);
tester.suspend(hostnamesByState.get(Node.State.active).get(1));
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Set<Node.State> childStates2Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.map(Node::state).collect(Collectors.toSet());
assertEquals(Set.of(Node.State.failed), childStates2Iter);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
tester.suspend(hostWithHwFailure);
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
public void hw_fail_only_if_whole_host_is_suspended() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithFailureReports).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
String activeChild1 = hostnamesByState.get(Node.State.active).get(0);
String activeChild2 = hostnamesByState.get(Node.State.active).get(1);
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
String readyChild = hostnamesByState.get(Node.State.ready).get(0);
Report badTotalMemorySizeReport = Report.basicReport("badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(hostWithFailureReports);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild1);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild2);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild2).get().state());
}
@Test
public void nodes_for_suspended_applications_are_not_failed() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.suspend(NodeFailTester.app1);
String host_from_suspended_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String host_from_normal_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(host_from_suspended_app);
tester.serviceMonitor.setHostDown(host_from_normal_app);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().node(host_from_normal_app).get().isDown());
assertTrue(tester.nodeRepository.nodes().node(host_from_suspended_app).get().isDown());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(host_from_normal_app).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(host_from_suspended_app).get().state());
}
@Test
public void zone_is_not_working_if_too_many_nodes_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(0).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(2).hostname());
tester.runMaintainers();
assertFalse(tester.nodeRepository.nodes().isWorking());
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue("Node failing is deactivated", tester.nodeRepository.nodes().list(Node.State.failed).isEmpty());
}
@Test
public void node_failing() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
Node readyFail1 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(2);
Node readyFail2 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(3);
tester.nodeRepository.nodes().write(readyFail1.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.nodeRepository.nodes().write(readyFail2.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
tester.runMaintainers();
assertEquals(2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail1.hostname()).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail2.hostname()).get().state());
String downHost1 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String downHost2 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(downHost1);
tester.serviceMonitor.setHostDown(downHost2);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.serviceMonitor.setHostUp(downHost1);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 1, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 1, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(downHost2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
tester.serviceMonitor.setHostDown(downHost1);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.clock.advance(Duration.ofMinutes(120));
tester.failer = tester.createFailer();
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1));
tester.serviceMonitor.setHostDown(lastNode.hostname());
for (int minutes = 0; minutes < 75; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.createReadyNodes(1, 16, NodeFailTester.nodeResources);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertTrue("The index of the last failed node is not reused",
tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1)).allocation().get().membership().index()
>
lastNode.allocation().get().membership().index());
assertEquals("Node failing does not cause recording of scaling events",
1,
tester.nodeRepository.applications().get(NodeFailTester.app1).get().cluster(NodeFailTester.testCluster).get().scalingEvents().size());
}
@Test
public void re_activate_grace_period_test() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
String downNode = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
tester.serviceMonitor.setHostDown(downNode);
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(75));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
tester.nodeRepository.nodes().reactivate(downNode, Agent.system, getClass().getSimpleName());
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(45));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
}
@Test
public void node_failing_can_allocate_spare() {
var resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(3, 1, resources), false, true);
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(4, resources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
tester.activate(NodeFailTester.app1, cluster, capacity);
String downHost = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).first().get().hostname();
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
}
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(downHost, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
}
@Test
public void node_failing_can_allocate_spare_to_replace_failed_node_in_group() {
NodeResources resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(4, 2, resources), false, true);
ClusterSpec spec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity, spec);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(5, resources);
tester.activate(NodeFailTester.app1, spec, capacity);
NodeList activeNodes = tester.nodeRepository.nodes().list(Node.State.active);
Node downNode = activeNodes.owner(NodeFailTester.app1).first().get();
Node downHost = activeNodes.parentOf(downNode).get();
tester.tester.patchNode(downHost, (node) -> node.with(node.reports().withReport(badTotalMemorySizeReport)));
tester.suspend(downHost.hostname());
tester.suspend(downNode.hostname());
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
NodeList failedOrActive = tester.nodeRepository.nodes().list(Node.State.active, Node.State.failed);
assertEquals(4, failedOrActive.state(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(Set.of(downNode.hostname()), failedOrActive.state(Node.State.failed).nodeType(NodeType.tenant).hostnames());
}
@Test
public void host_not_failed_without_config_requests() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(180));
Node host = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).first().get();
tester.allNodesMakeAConfigRequestExcept(host);
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
@Test
public void failing_hosts() {
NodeFailTester tester = NodeFailTester.withTwoApplications(7);
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
String downHost1 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.serviceMonitor.setHostDown(downHost1);
for (int minutes = 0; minutes < 45; minutes += 5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(2 + 1, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
Node downTenant1 = tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).first().get();
tester.serviceMonitor.setHostDown(downTenant1.hostname());
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(3 + 1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(3 + 1, tester.deployer.redeployments);
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost2 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost2);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(90));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(5 + 2, tester.deployer.redeployments);
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost3 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost3);
tester.runMaintainers();
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(6 + 2, tester.deployer.redeployments);
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
@Test
public void failing_proxy_nodes() {
test_infra_application_fail(NodeType.proxy, 10, 1);
}
@Test
public void failing_config_hosts() {
test_infra_application_fail(NodeType.confighost, 3, 0);
}
private void test_infra_application_fail(NodeType nodeType, int count, int expectedFailCount) {
NodeFailTester tester = NodeFailTester.withInfraApplication(nodeType, count);
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
Set<String> downHosts = Set.of("host2", "host3");
for (String downHost : downHosts)
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(expectedFailCount, tester.deployer.redeployments);
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
assertEquals(expectedFailCount, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType).size());
tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType)
.forEach(node -> assertTrue(downHosts.contains(node.hostname())));
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
@Test
public void failing_divergent_ready_nodes() {
NodeFailTester tester = NodeFailTester.withNoApplications();
Node readyNode = tester.createReadyNodes(1).get(0);
tester.runMaintainers();
assertEquals(Node.State.ready, readyNode.state());
tester.nodeRepository.nodes().write(readyNode.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
@Test
public void testUpness() {
assertFalse(badNode(0, 0, 0));
assertFalse(badNode(0, 0, 2));
assertFalse(badNode(0, 3, 0));
assertFalse(badNode(0, 3, 2));
assertTrue(badNode(1, 0, 0));
assertTrue(badNode(1, 0, 2));
assertFalse(badNode(1, 3, 0));
assertFalse(badNode(1, 3, 2));
}
private void addServiceInstances(List<ServiceInstance> list, ServiceStatus status, int num) {
for (int i = 0; i < num; ++i) {
ServiceInstance service = mock(ServiceInstance.class);
when(service.serviceStatus()).thenReturn(status);
list.add(service);
}
}
private boolean badNode(int numDown, int numUp, int numNotChecked) {
List<ServiceInstance> services = new ArrayList<>();
addServiceInstances(services, ServiceStatus.DOWN, numDown);
addServiceInstances(services, ServiceStatus.UP, numUp);
addServiceInstances(services, ServiceStatus.NOT_CHECKED, numNotChecked);
Collections.shuffle(services);
return NodeHealthTracker.allDown(services);
}
/**
* Selects the first parent host that:
* - has exactly n nodes in state 'active'
* - is not present in the 'except' array
*/
private static String selectFirstParentHostWithNActiveNodesExcept(NodeRepository nodeRepository, int n, String... except) {
Set<String> exceptSet = Arrays.stream(except).collect(Collectors.toSet());
return nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).stream()
.collect(Collectors.groupingBy(Node::parentHostname))
.entrySet().stream()
.filter(entry -> entry.getValue().size() == n)
.map(Map.Entry::getKey)
.flatMap(parentHost -> Stream.of(parentHost.get()))
.filter(node -> ! exceptSet.contains(node))
.findFirst().get();
}
} | class NodeFailerTest {
private static final Report badTotalMemorySizeReport = Report.basicReport(
"badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
@Test
public void fail_nodes_with_severe_reports_if_allowed_to_be_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
testNodeFailingWith(tester, hostWithFailureReports);
}
private void testNodeFailingWith(NodeFailTester tester, String hostWithHwFailure) {
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
tester.suspend(hostnamesByState.get(Node.State.active).get(0));
tester.runMaintainers();
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Map<Node.State, List<String>> expectedHostnamesByState1Iter = Map.of(
Node.State.failed, List.of(hostnamesByState.get(Node.State.ready).get(0), hostnamesByState.get(Node.State.active).get(0)),
Node.State.active, hostnamesByState.get(Node.State.active).subList(1, 2));
Map<Node.State, List<String>> hostnamesByState1Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(expectedHostnamesByState1Iter, hostnamesByState1Iter);
tester.suspend(hostnamesByState.get(Node.State.active).get(1));
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
Set<Node.State> childStates2Iter = tester.nodeRepository.nodes().list().childrenOf(hostWithHwFailure).asList().stream()
.map(Node::state).collect(Collectors.toSet());
assertEquals(Set.of(Node.State.failed), childStates2Iter);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
tester.suspend(hostWithHwFailure);
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
public void hw_fail_only_if_whole_host_is_suspended() {
NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithFailureReports).asList().stream()
.collect(Collectors.groupingBy(Node::state, Collectors.mapping(Node::hostname, Collectors.toList())));
assertEquals(2, hostnamesByState.get(Node.State.active).size());
String activeChild1 = hostnamesByState.get(Node.State.active).get(0);
String activeChild2 = hostnamesByState.get(Node.State.active).get(1);
assertEquals(1, hostnamesByState.get(Node.State.ready).size());
String readyChild = hostnamesByState.get(Node.State.ready).get(0);
Report badTotalMemorySizeReport = Report.basicReport("badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
tester.nodeRepository.nodes().write(updatedNode, () -> {});
});
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(hostWithFailureReports);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild1);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
tester.suspend(activeChild2);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild2).get().state());
}
@Test
public void nodes_for_suspended_applications_are_not_failed() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.suspend(NodeFailTester.app1);
String host_from_suspended_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String host_from_normal_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(host_from_suspended_app);
tester.serviceMonitor.setHostDown(host_from_normal_app);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().node(host_from_normal_app).get().isDown());
assertTrue(tester.nodeRepository.nodes().node(host_from_suspended_app).get().isDown());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(host_from_normal_app).get().state());
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(host_from_suspended_app).get().state());
}
@Test
public void zone_is_not_working_if_too_many_nodes_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(0).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(2).hostname());
tester.runMaintainers();
assertFalse(tester.nodeRepository.nodes().isWorking());
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
assertTrue("Node failing is deactivated", tester.nodeRepository.nodes().list(Node.State.failed).isEmpty());
}
@Test
public void node_failing() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
Node readyFail1 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(2);
Node readyFail2 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(3);
tester.nodeRepository.nodes().write(readyFail1.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.nodeRepository.nodes().write(readyFail2.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
tester.runMaintainers();
assertEquals(2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail1.hostname()).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail2.hostname()).get().state());
String downHost1 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
String downHost2 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(downHost1);
tester.serviceMonitor.setHostDown(downHost2);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.serviceMonitor.setHostUp(downHost1);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 1, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 1, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(downHost2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
tester.serviceMonitor.setHostDown(downHost1);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.clock.advance(Duration.ofMinutes(120));
tester.failer = tester.createFailer();
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1));
tester.serviceMonitor.setHostDown(lastNode.hostname());
for (int minutes = 0; minutes < 75; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 2, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.createReadyNodes(1, 16, NodeFailTester.nodeResources);
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.deployer.redeployments);
assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertTrue("The index of the last failed node is not reused",
tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1)).allocation().get().membership().index()
>
lastNode.allocation().get().membership().index());
assertEquals("Node failing does not cause recording of scaling events",
1,
tester.nodeRepository.applications().get(NodeFailTester.app1).get().cluster(NodeFailTester.testCluster).get().scalingEvents().size());
}
@Test
public void re_activate_grace_period_test() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
String downNode = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
tester.serviceMonitor.setHostDown(downNode);
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(75));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
tester.nodeRepository.nodes().reactivate(downNode, Agent.system, getClass().getSimpleName());
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(45));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
}
@Test
public void node_failing_can_allocate_spare() {
var resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(3, 1, resources), false, true);
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(4, resources);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
tester.activate(NodeFailTester.app1, cluster, capacity);
String downHost = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).first().get().hostname();
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
}
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(downHost, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
}
@Test
public void node_failing_can_allocate_spare_to_replace_failed_node_in_group() {
NodeResources resources = new NodeResources(1, 20, 15, 1);
Capacity capacity = Capacity.from(new ClusterResources(4, 2, resources), false, true);
ClusterSpec spec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
NodeFailTester tester = NodeFailTester.withOneUndeployedApplication(capacity, spec);
assertEquals("Test depends on this setting in NodeFailTester", 1, tester.nodeRepository.spareCount());
tester.createAndActivateHosts(5, resources);
tester.activate(NodeFailTester.app1, spec, capacity);
NodeList activeNodes = tester.nodeRepository.nodes().list(Node.State.active);
Node downNode = activeNodes.owner(NodeFailTester.app1).first().get();
Node downHost = activeNodes.parentOf(downNode).get();
tester.tester.patchNode(downHost, (node) -> node.with(node.reports().withReport(badTotalMemorySizeReport)));
tester.suspend(downHost.hostname());
tester.suspend(downNode.hostname());
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
NodeList failedOrActive = tester.nodeRepository.nodes().list(Node.State.active, Node.State.failed);
assertEquals(4, failedOrActive.state(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(Set.of(downNode.hostname()), failedOrActive.state(Node.State.failed).nodeType(NodeType.tenant).hostnames());
}
@Test
public void host_not_failed_without_config_requests() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(180));
Node host = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).first().get();
tester.allNodesMakeAConfigRequestExcept(host);
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
@Test
public void failing_hosts() {
NodeFailTester tester = NodeFailTester.withTwoApplications(7);
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
String downHost1 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
tester.serviceMonitor.setHostDown(downHost1);
for (int minutes = 0; minutes < 45; minutes += 5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(2 + 1, tester.deployer.redeployments);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
Node downTenant1 = tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).first().get();
tester.serviceMonitor.setHostDown(downTenant1.hostname());
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(3 + 1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(3 + 1, tester.deployer.redeployments);
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost2 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost2);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(90));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(5 + 2, tester.deployer.redeployments);
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
String downHost3 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
tester.serviceMonitor.setHostDown(downHost3);
tester.runMaintainers();
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(6 + 2, tester.deployer.redeployments);
assertEquals(9, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
@Test
public void failing_proxy_nodes() {
test_infra_application_fail(NodeType.proxy, 10, 1);
}
@Test
public void failing_config_hosts() {
test_infra_application_fail(NodeType.confighost, 3, 0);
}
private void test_infra_application_fail(NodeType nodeType, int count, int expectedFailCount) {
NodeFailTester tester = NodeFailTester.withInfraApplication(nodeType, count);
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
Set<String> downHosts = Set.of("host2", "host3");
for (String downHost : downHosts)
tester.serviceMonitor.setHostDown(downHost);
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(expectedFailCount, tester.deployer.redeployments);
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
assertEquals(expectedFailCount, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType).size());
tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType)
.forEach(node -> assertTrue(downHosts.contains(node.hostname())));
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
@Test
public void failing_divergent_ready_nodes() {
NodeFailTester tester = NodeFailTester.withNoApplications();
Node readyNode = tester.createReadyNodes(1).get(0);
tester.runMaintainers();
assertEquals(Node.State.ready, readyNode.state());
tester.nodeRepository.nodes().write(readyNode.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.runMaintainers();
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
@Test
public void testUpness() {
assertFalse(badNode(0, 0, 0));
assertFalse(badNode(0, 0, 2));
assertFalse(badNode(0, 3, 0));
assertFalse(badNode(0, 3, 2));
assertTrue(badNode(1, 0, 0));
assertTrue(badNode(1, 0, 2));
assertFalse(badNode(1, 3, 0));
assertFalse(badNode(1, 3, 2));
}
private void addServiceInstances(List<ServiceInstance> list, ServiceStatus status, int num) {
for (int i = 0; i < num; ++i) {
ServiceInstance service = mock(ServiceInstance.class);
when(service.serviceStatus()).thenReturn(status);
list.add(service);
}
}
private boolean badNode(int numDown, int numUp, int numNotChecked) {
List<ServiceInstance> services = new ArrayList<>();
addServiceInstances(services, ServiceStatus.DOWN, numDown);
addServiceInstances(services, ServiceStatus.UP, numUp);
addServiceInstances(services, ServiceStatus.NOT_CHECKED, numNotChecked);
Collections.shuffle(services);
return NodeHealthTracker.allDown(services);
}
/**
* Selects the first parent host that:
* - has exactly n nodes in state 'active'
* - is not present in the 'except' array
*/
private static String selectFirstParentHostWithNActiveNodesExcept(NodeRepository nodeRepository, int n, String... except) {
Set<String> exceptSet = Arrays.stream(except).collect(Collectors.toSet());
return nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).stream()
.collect(Collectors.groupingBy(Node::parentHostname))
.entrySet().stream()
.filter(entry -> entry.getValue().size() == n)
.map(Map.Entry::getKey)
.flatMap(parentHost -> Stream.of(parentHost.get()))
.filter(node -> ! exceptSet.contains(node))
.findFirst().get();
}
} | |
It should always be normalized and absolute here. The only place where this path comes from outside (`ContainerPath::fromPathOnHost`), it is normalized before being passed here. | private ContainerPath(ContainerFileSystem containerFs, Path pathOnHost, String[] parts) {
this.containerFs = Objects.requireNonNull(containerFs);
this.pathOnHost = Objects.requireNonNull(pathOnHost);
this.parts = Objects.requireNonNull(parts);
if (!pathOnHost.isAbsolute())
throw new IllegalArgumentException("Path host must be absolute: " + pathOnHost);
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
if (!pathOnHost.startsWith(containerRootOnHost))
throw new IllegalArgumentException("Path on host (" + pathOnHost + ") must start with container root on host (" + containerRootOnHost + ")");
} | if (!pathOnHost.startsWith(containerRootOnHost)) | private ContainerPath(ContainerFileSystem containerFs, Path pathOnHost, String[] parts) {
this.containerFs = Objects.requireNonNull(containerFs);
this.pathOnHost = Objects.requireNonNull(pathOnHost);
this.parts = Objects.requireNonNull(parts);
if (!pathOnHost.isAbsolute())
throw new IllegalArgumentException("Path host must be absolute: " + pathOnHost);
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
if (!pathOnHost.startsWith(containerRootOnHost))
throw new IllegalArgumentException("Path on host (" + pathOnHost + ") must start with container root on host (" + containerRootOnHost + ")");
} | class ContainerPath implements Path {
private final ContainerFileSystem containerFs;
private final Path pathOnHost;
private final String[] parts;
public Path pathOnHost() {
return pathOnHost;
}
@Override
public FileSystem getFileSystem() {
return containerFs;
}
@Override
public ContainerPath getRoot() {
return resolve(containerFs, new String[0], Path.of("/"));
}
@Override
public Path getFileName() {
if (parts.length == 0) return null;
return Path.of(parts[parts.length - 1]);
}
@Override
public ContainerPath getParent() {
if (parts.length == 0) return null;
return new ContainerPath(containerFs, pathOnHost.getParent(), Arrays.copyOf(parts, parts.length-1));
}
@Override
public int getNameCount() {
return parts.length;
}
@Override
public Path getName(int index) {
return Path.of(parts[index]);
}
@Override
public Path subpath(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex >= endIndex || endIndex > parts.length)
throw new IllegalArgumentException();
if (endIndex - beginIndex == 1) return getName(beginIndex);
String[] rest = new String[endIndex - beginIndex - 1];
System.arraycopy(parts, beginIndex + 1, rest, 0, rest.length);
return Path.of(parts[beginIndex], rest);
}
@Override
public ContainerPath resolve(Path other) {
return resolve(containerFs, parts, other);
}
@Override
public ContainerPath resolveSibling(String other) {
return resolve(Path.of("..", other));
}
@Override
public boolean startsWith(Path other) {
if (other.getFileSystem() != containerFs) return false;
String[] otherParts = toContainerPath(other).parts;
if (parts.length < otherParts.length) return false;
for (int i = 0; i < otherParts.length; i++) {
if ( ! parts[i].equals(otherParts[i])) return false;
}
return true;
}
@Override
public boolean endsWith(Path other) {
int offset = parts.length - other.getNameCount();
if (offset < 0 || (other.isAbsolute() && offset > 0)) return false;
for (int i = 0; i < other.getNameCount(); i++) {
if ( ! parts[offset + i].equals(other.getName(i).toString())) return false;
}
return true;
}
@Override
public boolean isAbsolute() {
return true;
}
@Override
public ContainerPath normalize() {
return this;
}
@Override
public ContainerPath toAbsolutePath() {
return this;
}
@Override
public ContainerPath toRealPath(LinkOption... options) throws IOException {
Path realPathOnHost = pathOnHost.toRealPath(options);
if (realPathOnHost.equals(pathOnHost)) return this;
return fromPathOnHost(containerFs, realPathOnHost);
}
@Override
public Path relativize(Path other) {
return pathOnHost.relativize(toContainerPath(other).pathOnHost);
}
@Override
public URI toUri() {
throw new UnsupportedOperationException();
}
@Override
public WatchKey register(WatchService watcher, WatchEvent.Kind<?>[] events, WatchEvent.Modifier... modifiers) throws IOException {
return pathOnHost.register(watcher, events, modifiers);
}
@Override
public int compareTo(Path other) {
return pathOnHost.compareTo(toContainerPath(other));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ContainerPath paths = (ContainerPath) o;
return containerFs.equals(paths.containerFs) && pathOnHost.equals(paths.pathOnHost) && Arrays.equals(parts, paths.parts);
}
@Override
public int hashCode() {
int result = Objects.hash(containerFs, pathOnHost);
result = 31 * result + Arrays.hashCode(parts);
return result;
}
@Override
public String toString() {
return '/' + String.join("/", parts);
}
private static ContainerPath resolve(ContainerFileSystem containerFs, String[] currentParts, Path other) {
List<String> parts = other.isAbsolute() ? new ArrayList<>() : new ArrayList<>(Arrays.asList(currentParts));
for (int i = 0; i < other.getNameCount(); i++) {
String part = other.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) {
if (!parts.isEmpty()) parts.remove(parts.size() - 1);
continue;
}
parts.add(part);
}
return new ContainerPath(containerFs,
containerFs.provider().containerRootOnHost().resolve(String.join("/", parts)),
parts.toArray(String[]::new));
}
static ContainerPath fromPathInContainer(ContainerFileSystem containerFs, Path pathInContainer) {
if (!pathInContainer.isAbsolute())
throw new IllegalArgumentException("Path in container must be absolute: " + pathInContainer);
return resolve(containerFs, new String[0], pathInContainer);
}
static ContainerPath fromPathOnHost(ContainerFileSystem containerFs, Path pathOnHost) {
pathOnHost = pathOnHost.normalize();
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
Path pathUnderContainerStore = containerRootOnHost.relativize(pathOnHost);
List<String> parts = new ArrayList<>();
for (int i = 0; i < pathUnderContainerStore.getNameCount(); i++) {
String part = pathUnderContainerStore.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost);
parts.add(part);
}
return new ContainerPath(containerFs, pathOnHost, parts.toArray(String[]::new));
}
} | class ContainerPath implements Path {
private final ContainerFileSystem containerFs;
private final Path pathOnHost;
private final String[] parts;
public Path pathOnHost() {
return pathOnHost;
}
@Override
public FileSystem getFileSystem() {
return containerFs;
}
@Override
public ContainerPath getRoot() {
return resolve(containerFs, new String[0], Path.of("/"));
}
@Override
public Path getFileName() {
if (parts.length == 0) return null;
return Path.of(parts[parts.length - 1]);
}
@Override
public ContainerPath getParent() {
if (parts.length == 0) return null;
return new ContainerPath(containerFs, pathOnHost.getParent(), Arrays.copyOf(parts, parts.length-1));
}
@Override
public int getNameCount() {
return parts.length;
}
@Override
public Path getName(int index) {
return Path.of(parts[index]);
}
@Override
public Path subpath(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex >= endIndex || endIndex > parts.length)
throw new IllegalArgumentException();
if (endIndex - beginIndex == 1) return getName(beginIndex);
String[] rest = new String[endIndex - beginIndex - 1];
System.arraycopy(parts, beginIndex + 1, rest, 0, rest.length);
return Path.of(parts[beginIndex], rest);
}
@Override
public ContainerPath resolve(Path other) {
return resolve(containerFs, parts, other);
}
@Override
public ContainerPath resolveSibling(String other) {
return resolve(Path.of("..", other));
}
@Override
public boolean startsWith(Path other) {
if (other.getFileSystem() != containerFs) return false;
String[] otherParts = toContainerPath(other).parts;
if (parts.length < otherParts.length) return false;
for (int i = 0; i < otherParts.length; i++) {
if ( ! parts[i].equals(otherParts[i])) return false;
}
return true;
}
@Override
public boolean endsWith(Path other) {
int offset = parts.length - other.getNameCount();
if (offset < 0 || (other.isAbsolute() && offset > 0)) return false;
for (int i = 0; i < other.getNameCount(); i++) {
if ( ! parts[offset + i].equals(other.getName(i).toString())) return false;
}
return true;
}
@Override
public boolean isAbsolute() {
return true;
}
@Override
public ContainerPath normalize() {
return this;
}
@Override
public ContainerPath toAbsolutePath() {
return this;
}
@Override
public ContainerPath toRealPath(LinkOption... options) throws IOException {
Path realPathOnHost = pathOnHost.toRealPath(options);
if (realPathOnHost.equals(pathOnHost)) return this;
return fromPathOnHost(containerFs, realPathOnHost);
}
@Override
public Path relativize(Path other) {
return pathOnHost.relativize(toContainerPath(other).pathOnHost);
}
@Override
public URI toUri() {
throw new UnsupportedOperationException();
}
@Override
public WatchKey register(WatchService watcher, WatchEvent.Kind<?>[] events, WatchEvent.Modifier... modifiers) throws IOException {
return pathOnHost.register(watcher, events, modifiers);
}
@Override
public int compareTo(Path other) {
return pathOnHost.compareTo(toContainerPath(other));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ContainerPath paths = (ContainerPath) o;
return containerFs.equals(paths.containerFs) && pathOnHost.equals(paths.pathOnHost) && Arrays.equals(parts, paths.parts);
}
@Override
public int hashCode() {
int result = Objects.hash(containerFs, pathOnHost);
result = 31 * result + Arrays.hashCode(parts);
return result;
}
@Override
public String toString() {
return '/' + String.join("/", parts);
}
private static ContainerPath resolve(ContainerFileSystem containerFs, String[] currentParts, Path other) {
List<String> parts = other.isAbsolute() ? new ArrayList<>() : new ArrayList<>(Arrays.asList(currentParts));
for (int i = 0; i < other.getNameCount(); i++) {
String part = other.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) {
if (!parts.isEmpty()) parts.remove(parts.size() - 1);
continue;
}
parts.add(part);
}
return new ContainerPath(containerFs,
containerFs.provider().containerRootOnHost().resolve(String.join("/", parts)),
parts.toArray(String[]::new));
}
static ContainerPath fromPathInContainer(ContainerFileSystem containerFs, Path pathInContainer) {
if (!pathInContainer.isAbsolute())
throw new IllegalArgumentException("Path in container must be absolute: " + pathInContainer);
return resolve(containerFs, new String[0], pathInContainer);
}
static ContainerPath fromPathOnHost(ContainerFileSystem containerFs, Path pathOnHost) {
pathOnHost = pathOnHost.normalize();
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
Path pathUnderContainerStorage = containerRootOnHost.relativize(pathOnHost);
if (pathUnderContainerStorage.getNameCount() == 0 || pathUnderContainerStorage.getName(0).toString().isEmpty())
return new ContainerPath(containerFs, pathOnHost, new String[0]);
if (pathUnderContainerStorage.getName(0).toString().equals(".."))
throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost);
List<String> parts = new ArrayList<>();
for (int i = 0; i < pathUnderContainerStorage.getNameCount(); i++)
parts.add(pathUnderContainerStorage.getName(i).toString());
return new ContainerPath(containerFs, pathOnHost, parts.toArray(String[]::new));
}
} |
```suggestion ``` | public EncodedClusterStateBundle encode(ClusterStateBundle stateBundle) {
Slime slime = new Slime();
Cursor root = slime.setObject();
if (stateBundle.deferredActivation()) {
root.setBool("deferred-activation", stateBundle.deferredActivation());
}
Cursor states = root.setObject("states");
states.setString("baseline", stateBundle.getBaselineClusterState().toString());
Cursor spaces = states.setObject("spaces");
stateBundle.getDerivedBucketSpaceStates().entrySet()
.forEach(entry -> spaces.setString(entry.getKey(), entry.getValue().toString()));
if (stateBundle.getFeedBlock().map(fb -> fb.blockFeedInCluster()).orElse(false)) {
Cursor feedBlock = root.setObject("feed-block");
feedBlock.setBool("block-feed-in-cluster", true);
feedBlock.setString("description", stateBundle.getFeedBlock().get().getDescription());
}
byte[] serialized = BinaryFormat.encode(slime);
Compressor.Compression compression = BinaryFormat.encode_and_compress(slime, compressor);
return EncodedClusterStateBundle.fromCompressionBuffer(compression);
} | byte[] serialized = BinaryFormat.encode(slime); | public EncodedClusterStateBundle encode(ClusterStateBundle stateBundle) {
Slime slime = new Slime();
Cursor root = slime.setObject();
if (stateBundle.deferredActivation()) {
root.setBool("deferred-activation", stateBundle.deferredActivation());
}
Cursor states = root.setObject("states");
states.setString("baseline", stateBundle.getBaselineClusterState().toString());
Cursor spaces = states.setObject("spaces");
stateBundle.getDerivedBucketSpaceStates().entrySet()
.forEach(entry -> spaces.setString(entry.getKey(), entry.getValue().toString()));
if (stateBundle.getFeedBlock().map(fb -> fb.blockFeedInCluster()).orElse(false)) {
Cursor feedBlock = root.setObject("feed-block");
feedBlock.setBool("block-feed-in-cluster", true);
feedBlock.setString("description", stateBundle.getFeedBlock().get().getDescription());
}
Compressor.Compression compression = BinaryFormat.encode_and_compress(slime, compressor);
return EncodedClusterStateBundle.fromCompressionBuffer(compression);
} | class SlimeClusterStateBundleCodec implements ClusterStateBundleCodec, EnvelopedClusterStateBundleCodec {
private static final Compressor compressor = new Compressor(CompressionType.LZ4, 3, 0.90, 1024);
@Override
@Override
public ClusterStateBundle decode(EncodedClusterStateBundle encodedClusterStateBundle) {
byte[] uncompressed = compressor.decompress(encodedClusterStateBundle.getCompression());
Slime slime = BinaryFormat.decode(uncompressed);
Inspector root = slime.get();
Inspector states = root.field("states");
ClusterState baseline = ClusterState.stateFromString(states.field("baseline").asString());
Inspector spaces = states.field("spaces");
Map<String, AnnotatedClusterState> derivedStates = new HashMap<>();
spaces.traverse(((ObjectTraverser)(key, value) -> {
derivedStates.put(key, AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString(value.asString())));
}));
boolean deferredActivation = root.field("deferred-activation").asBool();
ClusterStateBundle.FeedBlock feedBlock = null;
Inspector fb = root.field("feed-block");
if (fb.valid() && fb.field("block-feed-in-cluster").asBool()) {
feedBlock = ClusterStateBundle.FeedBlock.blockedWithDescription(fb.field("description").asString());
}
return ClusterStateBundle.of(AnnotatedClusterState.withoutAnnotations(baseline), derivedStates,
feedBlock, deferredActivation);
}
@Override
public byte[] encodeWithEnvelope(ClusterStateBundle stateBundle) {
EncodedClusterStateBundle toEnvelope = encode(stateBundle);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setLong("compression-type", toEnvelope.getCompression().type().getCode());
root.setLong("uncompressed-size", toEnvelope.getCompression().uncompressedSize());
root.setData("data", toEnvelope.getCompression().data());
return BinaryFormat.encode(slime);
}
@Override
public ClusterStateBundle decodeWithEnvelope(byte[] encodedClusterStateBundle) {
Slime slime = BinaryFormat.decode(encodedClusterStateBundle);
Inspector root = slime.get();
CompressionType compressionType = CompressionType.valueOf((byte)root.field("compression-type").asLong());
int uncompressedSize = (int)root.field("uncompressed-size").asLong();
byte[] data = root.field("data").asData();
return decode(EncodedClusterStateBundle.fromCompressionBuffer(
new Compressor.Compression(compressionType, uncompressedSize, data)));
}
} | class SlimeClusterStateBundleCodec implements ClusterStateBundleCodec, EnvelopedClusterStateBundleCodec {
private static final Compressor compressor = new Compressor(CompressionType.LZ4, 3, 0.90, 1024);
@Override
@Override
public ClusterStateBundle decode(EncodedClusterStateBundle encodedClusterStateBundle) {
byte[] uncompressed = compressor.decompress(encodedClusterStateBundle.getCompression());
Slime slime = BinaryFormat.decode(uncompressed);
Inspector root = slime.get();
Inspector states = root.field("states");
ClusterState baseline = ClusterState.stateFromString(states.field("baseline").asString());
Inspector spaces = states.field("spaces");
Map<String, AnnotatedClusterState> derivedStates = new HashMap<>();
spaces.traverse(((ObjectTraverser)(key, value) -> {
derivedStates.put(key, AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString(value.asString())));
}));
boolean deferredActivation = root.field("deferred-activation").asBool();
ClusterStateBundle.FeedBlock feedBlock = null;
Inspector fb = root.field("feed-block");
if (fb.valid() && fb.field("block-feed-in-cluster").asBool()) {
feedBlock = ClusterStateBundle.FeedBlock.blockedWithDescription(fb.field("description").asString());
}
return ClusterStateBundle.of(AnnotatedClusterState.withoutAnnotations(baseline), derivedStates,
feedBlock, deferredActivation);
}
@Override
public byte[] encodeWithEnvelope(ClusterStateBundle stateBundle) {
EncodedClusterStateBundle toEnvelope = encode(stateBundle);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setLong("compression-type", toEnvelope.getCompression().type().getCode());
root.setLong("uncompressed-size", toEnvelope.getCompression().uncompressedSize());
root.setData("data", toEnvelope.getCompression().data());
return BinaryFormat.encode(slime);
}
@Override
public ClusterStateBundle decodeWithEnvelope(byte[] encodedClusterStateBundle) {
Slime slime = BinaryFormat.decode(encodedClusterStateBundle);
Inspector root = slime.get();
CompressionType compressionType = CompressionType.valueOf((byte)root.field("compression-type").asLong());
int uncompressedSize = (int)root.field("uncompressed-size").asLong();
byte[] data = root.field("data").asData();
return decode(EncodedClusterStateBundle.fromCompressionBuffer(
new Compressor.Compression(compressionType, uncompressedSize, data)));
}
} |
Unused, can be removed | public EncodedClusterStateBundle encode(ClusterStateBundle stateBundle) {
Slime slime = new Slime();
Cursor root = slime.setObject();
if (stateBundle.deferredActivation()) {
root.setBool("deferred-activation", stateBundle.deferredActivation());
}
Cursor states = root.setObject("states");
states.setString("baseline", stateBundle.getBaselineClusterState().toString());
Cursor spaces = states.setObject("spaces");
stateBundle.getDerivedBucketSpaceStates().entrySet()
.forEach(entry -> spaces.setString(entry.getKey(), entry.getValue().toString()));
if (stateBundle.getFeedBlock().map(fb -> fb.blockFeedInCluster()).orElse(false)) {
Cursor feedBlock = root.setObject("feed-block");
feedBlock.setBool("block-feed-in-cluster", true);
feedBlock.setString("description", stateBundle.getFeedBlock().get().getDescription());
}
byte[] serialized = BinaryFormat.encode(slime);
Compressor.Compression compression = BinaryFormat.encode_and_compress(slime, compressor);
return EncodedClusterStateBundle.fromCompressionBuffer(compression);
} | byte[] serialized = BinaryFormat.encode(slime); | public EncodedClusterStateBundle encode(ClusterStateBundle stateBundle) {
Slime slime = new Slime();
Cursor root = slime.setObject();
if (stateBundle.deferredActivation()) {
root.setBool("deferred-activation", stateBundle.deferredActivation());
}
Cursor states = root.setObject("states");
states.setString("baseline", stateBundle.getBaselineClusterState().toString());
Cursor spaces = states.setObject("spaces");
stateBundle.getDerivedBucketSpaceStates().entrySet()
.forEach(entry -> spaces.setString(entry.getKey(), entry.getValue().toString()));
if (stateBundle.getFeedBlock().map(fb -> fb.blockFeedInCluster()).orElse(false)) {
Cursor feedBlock = root.setObject("feed-block");
feedBlock.setBool("block-feed-in-cluster", true);
feedBlock.setString("description", stateBundle.getFeedBlock().get().getDescription());
}
Compressor.Compression compression = BinaryFormat.encode_and_compress(slime, compressor);
return EncodedClusterStateBundle.fromCompressionBuffer(compression);
} | class SlimeClusterStateBundleCodec implements ClusterStateBundleCodec, EnvelopedClusterStateBundleCodec {
private static final Compressor compressor = new Compressor(CompressionType.LZ4, 3, 0.90, 1024);
@Override
@Override
public ClusterStateBundle decode(EncodedClusterStateBundle encodedClusterStateBundle) {
byte[] uncompressed = compressor.decompress(encodedClusterStateBundle.getCompression());
Slime slime = BinaryFormat.decode(uncompressed);
Inspector root = slime.get();
Inspector states = root.field("states");
ClusterState baseline = ClusterState.stateFromString(states.field("baseline").asString());
Inspector spaces = states.field("spaces");
Map<String, AnnotatedClusterState> derivedStates = new HashMap<>();
spaces.traverse(((ObjectTraverser)(key, value) -> {
derivedStates.put(key, AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString(value.asString())));
}));
boolean deferredActivation = root.field("deferred-activation").asBool();
ClusterStateBundle.FeedBlock feedBlock = null;
Inspector fb = root.field("feed-block");
if (fb.valid() && fb.field("block-feed-in-cluster").asBool()) {
feedBlock = ClusterStateBundle.FeedBlock.blockedWithDescription(fb.field("description").asString());
}
return ClusterStateBundle.of(AnnotatedClusterState.withoutAnnotations(baseline), derivedStates,
feedBlock, deferredActivation);
}
@Override
public byte[] encodeWithEnvelope(ClusterStateBundle stateBundle) {
EncodedClusterStateBundle toEnvelope = encode(stateBundle);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setLong("compression-type", toEnvelope.getCompression().type().getCode());
root.setLong("uncompressed-size", toEnvelope.getCompression().uncompressedSize());
root.setData("data", toEnvelope.getCompression().data());
return BinaryFormat.encode(slime);
}
@Override
public ClusterStateBundle decodeWithEnvelope(byte[] encodedClusterStateBundle) {
Slime slime = BinaryFormat.decode(encodedClusterStateBundle);
Inspector root = slime.get();
CompressionType compressionType = CompressionType.valueOf((byte)root.field("compression-type").asLong());
int uncompressedSize = (int)root.field("uncompressed-size").asLong();
byte[] data = root.field("data").asData();
return decode(EncodedClusterStateBundle.fromCompressionBuffer(
new Compressor.Compression(compressionType, uncompressedSize, data)));
}
} | class SlimeClusterStateBundleCodec implements ClusterStateBundleCodec, EnvelopedClusterStateBundleCodec {
private static final Compressor compressor = new Compressor(CompressionType.LZ4, 3, 0.90, 1024);
@Override
@Override
public ClusterStateBundle decode(EncodedClusterStateBundle encodedClusterStateBundle) {
byte[] uncompressed = compressor.decompress(encodedClusterStateBundle.getCompression());
Slime slime = BinaryFormat.decode(uncompressed);
Inspector root = slime.get();
Inspector states = root.field("states");
ClusterState baseline = ClusterState.stateFromString(states.field("baseline").asString());
Inspector spaces = states.field("spaces");
Map<String, AnnotatedClusterState> derivedStates = new HashMap<>();
spaces.traverse(((ObjectTraverser)(key, value) -> {
derivedStates.put(key, AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString(value.asString())));
}));
boolean deferredActivation = root.field("deferred-activation").asBool();
ClusterStateBundle.FeedBlock feedBlock = null;
Inspector fb = root.field("feed-block");
if (fb.valid() && fb.field("block-feed-in-cluster").asBool()) {
feedBlock = ClusterStateBundle.FeedBlock.blockedWithDescription(fb.field("description").asString());
}
return ClusterStateBundle.of(AnnotatedClusterState.withoutAnnotations(baseline), derivedStates,
feedBlock, deferredActivation);
}
@Override
public byte[] encodeWithEnvelope(ClusterStateBundle stateBundle) {
EncodedClusterStateBundle toEnvelope = encode(stateBundle);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setLong("compression-type", toEnvelope.getCompression().type().getCode());
root.setLong("uncompressed-size", toEnvelope.getCompression().uncompressedSize());
root.setData("data", toEnvelope.getCompression().data());
return BinaryFormat.encode(slime);
}
@Override
public ClusterStateBundle decodeWithEnvelope(byte[] encodedClusterStateBundle) {
Slime slime = BinaryFormat.decode(encodedClusterStateBundle);
Inspector root = slime.get();
CompressionType compressionType = CompressionType.valueOf((byte)root.field("compression-type").asLong());
int uncompressedSize = (int)root.field("uncompressed-size").asLong();
byte[] data = root.field("data").asData();
return decode(EncodedClusterStateBundle.fromCompressionBuffer(
new Compressor.Compression(compressionType, uncompressedSize, data)));
}
} |
Join with above log statement? | public void processServiceDumpRequest(NodeAgentContext context) {
Instant startedAt = Instant.now();
NodeSpec nodeSpec = context.node();
ServiceDumpReport request = nodeSpec.reports().getReport(ServiceDumpReport.REPORT_ID, ServiceDumpReport.class)
.orElse(null);
if (request == null || request.isCompletedOrFailed()) {
context.log(log, Level.FINE, "No service dump requested or dump already completed/failed");
return;
}
if (isNullTimestamp(request.getCreatedMillisOrNull())) {
handleFailure(context, request, startedAt, null, "'createdMillis' is missing or null");
return;
}
String configId = request.configId();
if (configId == null) {
handleFailure(context, request, startedAt, null, "Service config id is missing from request");
return;
}
Instant expiry = expireAt(startedAt, request);
if (expiry.isBefore(startedAt)) {
handleFailure(context, request, startedAt, null, "Request already expired");
return;
}
try {
context.log(log, Level.FINE,
"Creating dump for " + configId + " requested at " + Instant.ofEpochMilli(request.getCreatedMillisOrNull()));
storeReport(context, createStartedReport(request, startedAt));
Path directoryInNode = context.pathInNodeUnderVespaHome("tmp/vespa-service-dump");
Path directoryOnHost = context.pathOnHostFromPathInNode(directoryInNode);
Files.deleteIfExists(directoryOnHost);
Files.createDirectory(directoryOnHost);
Path vespaJvmDumper = context.pathInNodeUnderVespaHome("bin/vespa-jvm-dumper");
CommandResult result = container.executeCommandInContainerAsRoot(
context, vespaJvmDumper.toString(), configId, directoryInNode.toString());
context.log(log, Level.FINE, "vespa-jvm-dumper exit code: " + result.getExitCode());
context.log(log, Level.FINE, "vespa-jvm-dumper output: " + result.getOutput());
if (result.getExitCode() > 0) {
handleFailure(context, request, startedAt, null, "Failed to create dump: " + result.getOutput());
return;
}
URI destination = serviceDumpDestination(nodeSpec, createDumpId(request));
context.log(log, Level.FINE, "Uploading files with destination " + destination + " and expiry " + expiry);
List<SyncFileInfo> files = dumpFiles(directoryOnHost, destination, expiry);
logFilesToUpload(context, files);
if (!syncClient.sync(context, files, Integer.MAX_VALUE)) {
handleFailure(context, request, startedAt, null, "Unable to upload all files");
return;
}
context.log(log, Level.FINE, "Upload complete");
storeReport(context, createSuccessReport(request, startedAt, destination));
} catch (Exception e) {
handleFailure(context, request, startedAt, e, e.getMessage());
}
} | context.log(log, Level.FINE, "vespa-jvm-dumper output: " + result.getOutput()); | public void processServiceDumpRequest(NodeAgentContext context) {
Instant startedAt = clock.instant();
NodeSpec nodeSpec = context.node();
ServiceDumpReport request = nodeSpec.reports().getReport(ServiceDumpReport.REPORT_ID, ServiceDumpReport.class)
.orElse(null);
if (request == null || request.isCompletedOrFailed()) {
context.log(log, Level.FINE, "No service dump requested or dump already completed/failed");
return;
}
if (isNullTimestamp(request.getCreatedMillisOrNull())) {
handleFailure(context, request, startedAt, "'createdMillis' is missing or null");
return;
}
String configId = request.configId();
if (configId == null) {
handleFailure(context, request, startedAt, "Service config id is missing from request");
return;
}
Instant expiry = expireAt(startedAt, request);
if (expiry.isBefore(startedAt)) {
handleFailure(context, request, startedAt, "Request already expired");
return;
}
UnixPath directoryInNode = new UnixPath(context.pathInNodeUnderVespaHome("tmp/vespa-service-dump"));
UnixPath directoryOnHost = new UnixPath(context.pathOnHostFromPathInNode(directoryInNode.toPath()));
try {
context.log(log, Level.INFO,
"Creating dump for " + configId + " requested at " + Instant.ofEpochMilli(request.getCreatedMillisOrNull()));
storeReport(context, createStartedReport(request, startedAt));
if (directoryOnHost.exists()) {
context.log(log, Level.INFO, "Removing existing directory '" + directoryOnHost +"'.");
directoryOnHost.deleteRecursively();
}
context.log(log, Level.INFO, "Creating '" + directoryOnHost +"'.");
directoryOnHost.createDirectory();
directoryOnHost.setPermissions("rwxrwxrwx");
UnixPath vespaJvmDumper = new UnixPath(context.pathInNodeUnderVespaHome("bin/vespa-jvm-dumper"));
context.log(log, Level.INFO, "Executing '" + vespaJvmDumper + "' with arguments '" + configId + "' and '" + directoryInNode + "'");
CommandResult result = container.executeCommandInContainerAsRoot(
context, vespaJvmDumper.toString(), configId, directoryInNode.toString());
context.log(log, Level.INFO, "vespa-jvm-dumper exited with code '" + result.getExitCode() + "' and output:\n" + result.getOutput());
if (result.getExitCode() > 0) {
handleFailure(context, request, startedAt, "Failed to create dump: " + result.getOutput());
return;
}
URI destination = serviceDumpDestination(nodeSpec, createDumpId(request));
context.log(log, Level.INFO, "Uploading files with destination " + destination + " and expiry " + expiry);
List<SyncFileInfo> files = dumpFiles(directoryOnHost.toPath(), destination, expiry);
if (!syncClient.sync(context, files, Integer.MAX_VALUE)) {
handleFailure(context, request, startedAt, "Unable to upload all files");
return;
}
context.log(log, Level.INFO, "Upload complete");
storeReport(context, createSuccessReport(clock, request, startedAt, destination));
} catch (Exception e) {
handleFailure(context, request, startedAt, e);
} finally {
if (directoryOnHost.exists()) {
context.log(log, Level.INFO, "Deleting directory '" + directoryOnHost +"'.");
directoryOnHost.deleteRecursively();
}
}
} | class VespaServiceDumperImpl implements VespaServiceDumper {
private static final Logger log = Logger.getLogger(VespaServiceDumperImpl.class.getName());
private final ContainerOperations container;
private final SyncClient syncClient;
private final NodeRepository nodeRepository;
public VespaServiceDumperImpl(ContainerOperations container, SyncClient syncClient, NodeRepository nodeRepository) {
this.container = container;
this.syncClient = syncClient;
this.nodeRepository = nodeRepository;
}
@Override
private List<SyncFileInfo> dumpFiles(Path directoryOnHost, URI destination, Instant expiry) {
return FileFinder.files(directoryOnHost).stream()
.flatMap(file -> SyncFileInfo.forServiceDump(destination, file.path(), expiry).stream())
.collect(Collectors.toList());
}
private void logFilesToUpload(NodeAgentContext context, List<SyncFileInfo> files) {
if (log.isLoggable(Level.FINE)) {
String message = files.stream()
.map(file -> file.source().toString())
.collect(Collectors.joining());
context.log(log, Level.FINE, message);
}
}
private static Instant expireAt(Instant startedAt, ServiceDumpReport request) {
return isNullTimestamp(request.expireAt())
? startedAt.plus(7, ChronoUnit.DAYS)
: Instant.ofEpochMilli(request.expireAt());
}
private void handleFailure(NodeAgentContext context, ServiceDumpReport request, Instant startedAt,
Exception failure, String message) {
if (failure != null) {
context.log(log, Level.WARNING, message, failure);
} else {
context.log(log, Level.WARNING, message);
}
ServiceDumpReport report = createErrorReport(request, startedAt, message);
storeReport(context, report);
}
private void storeReport(NodeAgentContext context, ServiceDumpReport report) {
NodeAttributes nodeAttributes = new NodeAttributes();
nodeAttributes.withReport(ServiceDumpReport.REPORT_ID, report.toJsonNode());
nodeRepository.updateNodeAttributes(context.hostname().value(), nodeAttributes);
}
private static ServiceDumpReport createStartedReport(ServiceDumpReport request, Instant startedAt) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), null, null, null, request.configId(),
request.expireAt(), null);
}
private static ServiceDumpReport createSuccessReport(ServiceDumpReport request, Instant startedAt, URI location) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), Instant.now().toEpochMilli(), null,
location.toString(), request.configId(), request.expireAt(), null);
}
private static ServiceDumpReport createErrorReport(ServiceDumpReport request, Instant startedAt, String message) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), null, Instant.now().toEpochMilli(), null,
request.configId(), request.expireAt(), message);
}
static String createDumpId(ServiceDumpReport request) {
String sanitizedConfigId = Lowercase.toLowerCase(request.configId()).replaceAll("[^a-z_0-9]", "-");
return sanitizedConfigId + "-" + request.getCreatedMillisOrNull().toString();
}
private static URI serviceDumpDestination(NodeSpec spec, String dumpId) {
URI archiveUri = spec.archiveUri().get();
String targetDirectory = "service-dump/" + dumpId;
return archiveUri.resolve(targetDirectory);
}
} | class VespaServiceDumperImpl implements VespaServiceDumper {
private static final Logger log = Logger.getLogger(VespaServiceDumperImpl.class.getName());
private final ContainerOperations container;
private final SyncClient syncClient;
private final NodeRepository nodeRepository;
private final Clock clock;
public VespaServiceDumperImpl(ContainerOperations container, SyncClient syncClient, NodeRepository nodeRepository) {
this(container, syncClient, nodeRepository, Clock.systemUTC());
}
VespaServiceDumperImpl(ContainerOperations container, SyncClient syncClient, NodeRepository nodeRepository,
Clock clock) {
this.container = container;
this.syncClient = syncClient;
this.nodeRepository = nodeRepository;
this.clock = clock;
}
@Override
private List<SyncFileInfo> dumpFiles(Path directoryOnHost, URI destination, Instant expiry) {
return FileFinder.files(directoryOnHost).stream()
.flatMap(file -> SyncFileInfo.forServiceDump(destination, file.path(), expiry).stream())
.collect(Collectors.toList());
}
private static Instant expireAt(Instant startedAt, ServiceDumpReport request) {
return isNullTimestamp(request.expireAt())
? startedAt.plus(7, ChronoUnit.DAYS)
: Instant.ofEpochMilli(request.expireAt());
}
private void handleFailure(NodeAgentContext context, ServiceDumpReport request, Instant startedAt, Exception failure) {
context.log(log, Level.WARNING, failure.toString(), failure);
ServiceDumpReport report = createErrorReport(clock, request, startedAt, failure.toString());
storeReport(context, report);
}
private void handleFailure(NodeAgentContext context, ServiceDumpReport request, Instant startedAt, String message) {
context.log(log, Level.WARNING, message);
ServiceDumpReport report = createErrorReport(clock, request, startedAt, message);
storeReport(context, report);
}
private void storeReport(NodeAgentContext context, ServiceDumpReport report) {
NodeAttributes nodeAttributes = new NodeAttributes();
nodeAttributes.withReport(ServiceDumpReport.REPORT_ID, report.toJsonNode());
nodeRepository.updateNodeAttributes(context.hostname().value(), nodeAttributes);
}
private static ServiceDumpReport createStartedReport(ServiceDumpReport request, Instant startedAt) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), null, null, null, request.configId(),
request.expireAt(), null);
}
private static ServiceDumpReport createSuccessReport(
Clock clock, ServiceDumpReport request, Instant startedAt, URI location) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), clock.instant().toEpochMilli(), null,
location.toString(), request.configId(), request.expireAt(), null);
}
private static ServiceDumpReport createErrorReport(
Clock clock, ServiceDumpReport request, Instant startedAt, String message) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), null, clock.instant().toEpochMilli(), null,
request.configId(), request.expireAt(), message);
}
static String createDumpId(ServiceDumpReport request) {
String sanitizedConfigId = Lowercase.toLowerCase(request.configId()).replaceAll("[^a-z_0-9]", "-");
return sanitizedConfigId + "-" + request.getCreatedMillisOrNull().toString();
}
private static URI serviceDumpDestination(NodeSpec spec, String dumpId) {
URI archiveUri = spec.archiveUri().get();
String targetDirectory = "service-dump/" + dumpId + "/";
return archiveUri.resolve(targetDirectory);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.