comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Oops. 🙈 | private void compare(HttpResponse response, String expected) throws JSONException, IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
response.render(baos);
System.err.println(baos.toString());
JSONObject actualJSON = new JSONObject(new String(baos.toByteArray()));
JSONObject expectedJSON = new JSONObject(expected);
assertEquals(expectedJSON.toString(), actualJSON.toString());
} | System.err.println(baos.toString()); | private void compare(HttpResponse response, String expected) throws JSONException, IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
response.render(baos);
System.err.println(baos.toString());
JSONObject actualJSON = new JSONObject(new String(baos.toByteArray()));
JSONObject expectedJSON = new JSONObject(expected);
assertEquals(expectedJSON.toString(), actualJSON.toString());
} | class path. (java.lang.NullPointerException)\n");
assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(app.instanceId(), devUsEast1).get().id(), null), "dev-us-east-1-log-first-part.json");
tester.configServer().setLogStream("Nope, this won't be logged");
tester.configServer().convergeServices(app.instanceId(), zone);
tester.setEndpoints(app.instanceId(), zone);
tester.runner().run();
assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.controller(), app.instanceId(), URI.create("https:
assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(app.instanceId(), devUsEast1).get().id(), "9"), "dev-us-east-1-log-second-part.json");
}
@Test
public void testResponsesWithDirectDeployment() {
var tester = new DeploymentTester();
var app = tester.newDeploymentContext();
tester.clock().setInstant(Instant.EPOCH);
var region = "us-west-1";
var applicationPackage = new ApplicationPackageBuilder().region(region).build();
tester.controller().applications().deploy(tester.instance().id(), ZoneId.from("prod", region),
Optional.of(applicationPackage),
new DeployOptions(true, Optional.empty(),
false, false));
assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.controller(), app.instanceId(), URI.create("https:
"jobs-direct-deployment.json");
} | class path. (java.lang.NullPointerException)\n");
assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(app.instanceId(), devUsEast1).get().id(), null), "dev-us-east-1-log-first-part.json");
tester.configServer().setLogStream("Nope, this won't be logged");
tester.configServer().convergeServices(app.instanceId(), zone);
tester.setEndpoints(app.instanceId(), zone);
tester.runner().run();
assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.controller(), app.instanceId(), URI.create("https:
assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(app.instanceId(), devUsEast1).get().id(), "9"), "dev-us-east-1-log-second-part.json");
}
@Test
public void testResponsesWithDirectDeployment() {
var tester = new DeploymentTester();
var app = tester.newDeploymentContext();
tester.clock().setInstant(Instant.EPOCH);
var region = "us-west-1";
var applicationPackage = new ApplicationPackageBuilder().region(region).build();
tester.controller().applications().deploy(tester.instance().id(), ZoneId.from("prod", region),
Optional.of(applicationPackage),
new DeployOptions(true, Optional.empty(),
false, false));
assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.controller(), app.instanceId(), URI.create("https:
"jobs-direct-deployment.json");
} |
Instead of "Some nodes" - write the list or a sample node? (btw, I think you should write a util to convert a list of hosts to String, and write the full list if the number of elements is below some reasonable threshold, while otherwise write a prefix of the list and then something like ... (and X more hosts). You could use this util here.) | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes."); | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (containersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
deployments.add(new DeploymentId(id.application(), id.type().zone(controller.system())));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type(), isSetup),
testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments)));
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
ZoneId zoneId = id.type().zone(controller.system());
TesterCloud.Status testStatus;
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
if (useConfigServer) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id, zoneId));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId, ZoneId zoneId) {
return new DeploymentId(runId.tester().id(), zoneId);
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (containersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
deployments.add(new DeploymentId(id.application(), id.type().zone(controller.system())));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type(), isSetup),
testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments)));
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
ZoneId zoneId = id.type().zone(controller.system());
TesterCloud.Status testStatus;
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
if (useConfigServer) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id, zoneId));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId, ZoneId zoneId) {
return new DeploymentId(runId.tester().id(), zoneId);
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
Should probably add some advise on what the customer should do next when the deployment fails? | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!"); | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (containersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
deployments.add(new DeploymentId(id.application(), id.type().zone(controller.system())));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type(), isSetup),
testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments)));
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
ZoneId zoneId = id.type().zone(controller.system());
TesterCloud.Status testStatus;
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
if (useConfigServer) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id, zoneId));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId, ZoneId zoneId) {
return new DeploymentId(runId.tester().id(), zoneId);
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (containersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
deployments.add(new DeploymentId(id.application(), id.type().zone(controller.system())));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type(), isSetup),
testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments)));
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
ZoneId zoneId = id.type().zone(controller.system());
TesterCloud.Status testStatus;
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
if (useConfigServer) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id, zoneId));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId, ZoneId zoneId) {
return new DeploymentId(runId.tester().id(), zoneId);
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
Would the number of allowed down hosts ever exceed this threshold? For the start and end (with failure), I think all ghosts should be printed. | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes."); | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (containersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
deployments.add(new DeploymentId(id.application(), id.type().zone(controller.system())));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type(), isSetup),
testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments)));
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
ZoneId zoneId = id.type().zone(controller.system());
TesterCloud.Status testStatus;
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
if (useConfigServer) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id, zoneId));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId, ZoneId zoneId) {
return new DeploymentId(runId.tester().id(), zoneId);
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (containersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
deployments.add(new DeploymentId(id.application(), id.type().zone(controller.system())));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type(), isSetup),
testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments)));
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
ZoneId zoneId = id.type().zone(controller.system());
TesterCloud.Status testStatus;
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
if (useConfigServer) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id, zoneId));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId, ZoneId zoneId) {
return new DeploymentId(runId.tester().id(), zoneId);
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
Yes. | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!"); | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (containersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
deployments.add(new DeploymentId(id.application(), id.type().zone(controller.system())));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type(), isSetup),
testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments)));
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
ZoneId zoneId = id.type().zone(controller.system());
TesterCloud.Status testStatus;
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
if (useConfigServer) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id, zoneId));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId, ZoneId zoneId) {
return new DeploymentId(runId.tester().id(), zoneId);
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (containersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
deployments.add(new DeploymentId(id.application(), id.type().zone(controller.system())));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type(), isSetup),
testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments)));
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
ZoneId zoneId = id.type().zone(controller.system());
TesterCloud.Status testStatus;
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
if (useConfigServer) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id, zoneId));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId, ZoneId zoneId) {
return new DeploymentId(runId.tester().id(), zoneId);
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
Possibly - I'm just thinking such a util might be used to dump out a lot of hosts - like all hosts or all but a few - but if that's not possible there is no point is making it complicated. | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes."); | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (containersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
deployments.add(new DeploymentId(id.application(), id.type().zone(controller.system())));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type(), isSetup),
testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments)));
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
ZoneId zoneId = id.type().zone(controller.system());
TesterCloud.Status testStatus;
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
if (useConfigServer) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id, zoneId));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId, ZoneId zoneId) {
return new DeploymentId(runId.tester().id(), zoneId);
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (containersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
deployments.add(new DeploymentId(id.application(), id.type().zone(controller.system())));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type(), isSetup),
testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments)));
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
ZoneId zoneId = id.type().zone(controller.system());
TesterCloud.Status testStatus;
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
if (useConfigServer) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id, zoneId));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId, ZoneId zoneId) {
return new DeploymentId(runId.tester().id(), zoneId);
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
Remove comment for the removed argument as well | protected void setUp(boolean dontInitializeNode2) throws Exception {
Distribution distribution = new Distribution(Distribution.getSimpleGroupConfig(2, 10));
jsonWriter.setDefaultPathPrefix("/cluster/v2");
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(0, 1, 2, 3);
ContentCluster cluster = new ContentCluster(
"books", nodes, distribution, 6 /* minStorageNodesUp*/, 0.9 /* minRatioOfStorageNodesUp */
/* determineBucketsFromBucketSpaceMetric */);
initializeCluster(cluster, nodes);
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4"));
Map<String, AnnotatedClusterState> bucketSpaceStates = new HashMap<>();
bucketSpaceStates.put("default", AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4 .3.s:m")));
bucketSpaceStates.put("global", baselineState);
books = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.of(baselineState, bucketSpaceStates), 0, 0);
}
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(1, 2, 3, 5, 7);
Set<ConfiguredNode> nodesInSlobrok = FleetControllerTest.toNodes(1, 3, 5, 7);
ContentCluster cluster = new ContentCluster(
"music", nodes, distribution, 4 /* minStorageNodesUp*/, 0.0 /* minRatioOfStorageNodesUp */
/* determineBucketsFromBucketSpaceMetric */);
if (dontInitializeNode2) {
initializeCluster(cluster, nodesInSlobrok);
}
else {
initializeCluster(cluster, nodes);
}
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:8 .0.s:d .2.s:d .4.s:d .6.s:d "
+ "storage:8 .0.s:d .2.s:d .4.s:d .6.s:d"));
music = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.ofBaselineOnly(baselineState), 0, 0);
}
ccSockets = new TreeMap<>();
ccSockets.put(0, new ClusterControllerStateRestAPI.Socket("localhost", 80));
restAPI = new ClusterControllerStateRestAPI(new ClusterControllerStateRestAPI.FleetControllerResolver() {
@Override
public Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers() {
Map<String, RemoteClusterControllerTaskScheduler> fleetControllers = new LinkedHashMap<>();
fleetControllers.put(books.context.cluster.getName(), books);
fleetControllers.put(music.context.cluster.getName(), music);
return fleetControllers;
}
}, ccSockets);
} | /* determineBucketsFromBucketSpaceMetric */); | protected void setUp(boolean dontInitializeNode2) throws Exception {
Distribution distribution = new Distribution(Distribution.getSimpleGroupConfig(2, 10));
jsonWriter.setDefaultPathPrefix("/cluster/v2");
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(0, 1, 2, 3);
ContentCluster cluster = new ContentCluster(
"books", nodes, distribution, 6 /* minStorageNodesUp*/, 0.9 /* minRatioOfStorageNodesUp */);
initializeCluster(cluster, nodes);
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4"));
Map<String, AnnotatedClusterState> bucketSpaceStates = new HashMap<>();
bucketSpaceStates.put("default", AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4 .3.s:m")));
bucketSpaceStates.put("global", baselineState);
books = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.of(baselineState, bucketSpaceStates), 0, 0);
}
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(1, 2, 3, 5, 7);
Set<ConfiguredNode> nodesInSlobrok = FleetControllerTest.toNodes(1, 3, 5, 7);
ContentCluster cluster = new ContentCluster(
"music", nodes, distribution, 4 /* minStorageNodesUp*/, 0.0 /* minRatioOfStorageNodesUp */);
if (dontInitializeNode2) {
initializeCluster(cluster, nodesInSlobrok);
}
else {
initializeCluster(cluster, nodes);
}
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:8 .0.s:d .2.s:d .4.s:d .6.s:d "
+ "storage:8 .0.s:d .2.s:d .4.s:d .6.s:d"));
music = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.ofBaselineOnly(baselineState), 0, 0);
}
ccSockets = new TreeMap<>();
ccSockets.put(0, new ClusterControllerStateRestAPI.Socket("localhost", 80));
restAPI = new ClusterControllerStateRestAPI(new ClusterControllerStateRestAPI.FleetControllerResolver() {
@Override
public Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers() {
Map<String, RemoteClusterControllerTaskScheduler> fleetControllers = new LinkedHashMap<>();
fleetControllers.put(books.context.cluster.getName(), books);
fleetControllers.put(music.context.cluster.getName(), music);
return fleetControllers;
}
}, ccSockets);
} | class StateRequest implements UnitStateRequest {
private String[] path;
private int recursive;
StateRequest(String req, int recursive) {
path = req.isEmpty() ? new String[0] : req.split("/");
this.recursive = recursive;
}
@Override
public int getRecursiveLevels() { return recursive;
}
@Override
public String[] getUnitPath() { return path; }
} | class StateRequest implements UnitStateRequest {
private String[] path;
private int recursive;
StateRequest(String req, int recursive) {
path = req.isEmpty() ? new String[0] : req.split("/");
this.recursive = recursive;
}
@Override
public int getRecursiveLevels() { return recursive;
}
@Override
public String[] getUnitPath() { return path; }
} |
Remove comment for the removed argument as well | protected void setUp(boolean dontInitializeNode2) throws Exception {
Distribution distribution = new Distribution(Distribution.getSimpleGroupConfig(2, 10));
jsonWriter.setDefaultPathPrefix("/cluster/v2");
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(0, 1, 2, 3);
ContentCluster cluster = new ContentCluster(
"books", nodes, distribution, 6 /* minStorageNodesUp*/, 0.9 /* minRatioOfStorageNodesUp */
/* determineBucketsFromBucketSpaceMetric */);
initializeCluster(cluster, nodes);
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4"));
Map<String, AnnotatedClusterState> bucketSpaceStates = new HashMap<>();
bucketSpaceStates.put("default", AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4 .3.s:m")));
bucketSpaceStates.put("global", baselineState);
books = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.of(baselineState, bucketSpaceStates), 0, 0);
}
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(1, 2, 3, 5, 7);
Set<ConfiguredNode> nodesInSlobrok = FleetControllerTest.toNodes(1, 3, 5, 7);
ContentCluster cluster = new ContentCluster(
"music", nodes, distribution, 4 /* minStorageNodesUp*/, 0.0 /* minRatioOfStorageNodesUp */
/* determineBucketsFromBucketSpaceMetric */);
if (dontInitializeNode2) {
initializeCluster(cluster, nodesInSlobrok);
}
else {
initializeCluster(cluster, nodes);
}
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:8 .0.s:d .2.s:d .4.s:d .6.s:d "
+ "storage:8 .0.s:d .2.s:d .4.s:d .6.s:d"));
music = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.ofBaselineOnly(baselineState), 0, 0);
}
ccSockets = new TreeMap<>();
ccSockets.put(0, new ClusterControllerStateRestAPI.Socket("localhost", 80));
restAPI = new ClusterControllerStateRestAPI(new ClusterControllerStateRestAPI.FleetControllerResolver() {
@Override
public Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers() {
Map<String, RemoteClusterControllerTaskScheduler> fleetControllers = new LinkedHashMap<>();
fleetControllers.put(books.context.cluster.getName(), books);
fleetControllers.put(music.context.cluster.getName(), music);
return fleetControllers;
}
}, ccSockets);
} | /* determineBucketsFromBucketSpaceMetric */); | protected void setUp(boolean dontInitializeNode2) throws Exception {
Distribution distribution = new Distribution(Distribution.getSimpleGroupConfig(2, 10));
jsonWriter.setDefaultPathPrefix("/cluster/v2");
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(0, 1, 2, 3);
ContentCluster cluster = new ContentCluster(
"books", nodes, distribution, 6 /* minStorageNodesUp*/, 0.9 /* minRatioOfStorageNodesUp */);
initializeCluster(cluster, nodes);
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4"));
Map<String, AnnotatedClusterState> bucketSpaceStates = new HashMap<>();
bucketSpaceStates.put("default", AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4 .3.s:m")));
bucketSpaceStates.put("global", baselineState);
books = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.of(baselineState, bucketSpaceStates), 0, 0);
}
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(1, 2, 3, 5, 7);
Set<ConfiguredNode> nodesInSlobrok = FleetControllerTest.toNodes(1, 3, 5, 7);
ContentCluster cluster = new ContentCluster(
"music", nodes, distribution, 4 /* minStorageNodesUp*/, 0.0 /* minRatioOfStorageNodesUp */);
if (dontInitializeNode2) {
initializeCluster(cluster, nodesInSlobrok);
}
else {
initializeCluster(cluster, nodes);
}
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:8 .0.s:d .2.s:d .4.s:d .6.s:d "
+ "storage:8 .0.s:d .2.s:d .4.s:d .6.s:d"));
music = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.ofBaselineOnly(baselineState), 0, 0);
}
ccSockets = new TreeMap<>();
ccSockets.put(0, new ClusterControllerStateRestAPI.Socket("localhost", 80));
restAPI = new ClusterControllerStateRestAPI(new ClusterControllerStateRestAPI.FleetControllerResolver() {
@Override
public Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers() {
Map<String, RemoteClusterControllerTaskScheduler> fleetControllers = new LinkedHashMap<>();
fleetControllers.put(books.context.cluster.getName(), books);
fleetControllers.put(music.context.cluster.getName(), music);
return fleetControllers;
}
}, ccSockets);
} | class StateRequest implements UnitStateRequest {
private String[] path;
private int recursive;
StateRequest(String req, int recursive) {
path = req.isEmpty() ? new String[0] : req.split("/");
this.recursive = recursive;
}
@Override
public int getRecursiveLevels() { return recursive;
}
@Override
public String[] getUnitPath() { return path; }
} | class StateRequest implements UnitStateRequest {
private String[] path;
private int recursive;
StateRequest(String req, int recursive) {
path = req.isEmpty() ? new String[0] : req.split("/");
this.recursive = recursive;
}
@Override
public int getRecursiveLevels() { return recursive;
}
@Override
public String[] getUnitPath() { return path; }
} |
Fixed | protected void setUp(boolean dontInitializeNode2) throws Exception {
Distribution distribution = new Distribution(Distribution.getSimpleGroupConfig(2, 10));
jsonWriter.setDefaultPathPrefix("/cluster/v2");
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(0, 1, 2, 3);
ContentCluster cluster = new ContentCluster(
"books", nodes, distribution, 6 /* minStorageNodesUp*/, 0.9 /* minRatioOfStorageNodesUp */
/* determineBucketsFromBucketSpaceMetric */);
initializeCluster(cluster, nodes);
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4"));
Map<String, AnnotatedClusterState> bucketSpaceStates = new HashMap<>();
bucketSpaceStates.put("default", AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4 .3.s:m")));
bucketSpaceStates.put("global", baselineState);
books = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.of(baselineState, bucketSpaceStates), 0, 0);
}
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(1, 2, 3, 5, 7);
Set<ConfiguredNode> nodesInSlobrok = FleetControllerTest.toNodes(1, 3, 5, 7);
ContentCluster cluster = new ContentCluster(
"music", nodes, distribution, 4 /* minStorageNodesUp*/, 0.0 /* minRatioOfStorageNodesUp */
/* determineBucketsFromBucketSpaceMetric */);
if (dontInitializeNode2) {
initializeCluster(cluster, nodesInSlobrok);
}
else {
initializeCluster(cluster, nodes);
}
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:8 .0.s:d .2.s:d .4.s:d .6.s:d "
+ "storage:8 .0.s:d .2.s:d .4.s:d .6.s:d"));
music = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.ofBaselineOnly(baselineState), 0, 0);
}
ccSockets = new TreeMap<>();
ccSockets.put(0, new ClusterControllerStateRestAPI.Socket("localhost", 80));
restAPI = new ClusterControllerStateRestAPI(new ClusterControllerStateRestAPI.FleetControllerResolver() {
@Override
public Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers() {
Map<String, RemoteClusterControllerTaskScheduler> fleetControllers = new LinkedHashMap<>();
fleetControllers.put(books.context.cluster.getName(), books);
fleetControllers.put(music.context.cluster.getName(), music);
return fleetControllers;
}
}, ccSockets);
} | /* determineBucketsFromBucketSpaceMetric */); | protected void setUp(boolean dontInitializeNode2) throws Exception {
Distribution distribution = new Distribution(Distribution.getSimpleGroupConfig(2, 10));
jsonWriter.setDefaultPathPrefix("/cluster/v2");
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(0, 1, 2, 3);
ContentCluster cluster = new ContentCluster(
"books", nodes, distribution, 6 /* minStorageNodesUp*/, 0.9 /* minRatioOfStorageNodesUp */);
initializeCluster(cluster, nodes);
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4"));
Map<String, AnnotatedClusterState> bucketSpaceStates = new HashMap<>();
bucketSpaceStates.put("default", AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4 .3.s:m")));
bucketSpaceStates.put("global", baselineState);
books = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.of(baselineState, bucketSpaceStates), 0, 0);
}
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(1, 2, 3, 5, 7);
Set<ConfiguredNode> nodesInSlobrok = FleetControllerTest.toNodes(1, 3, 5, 7);
ContentCluster cluster = new ContentCluster(
"music", nodes, distribution, 4 /* minStorageNodesUp*/, 0.0 /* minRatioOfStorageNodesUp */);
if (dontInitializeNode2) {
initializeCluster(cluster, nodesInSlobrok);
}
else {
initializeCluster(cluster, nodes);
}
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:8 .0.s:d .2.s:d .4.s:d .6.s:d "
+ "storage:8 .0.s:d .2.s:d .4.s:d .6.s:d"));
music = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.ofBaselineOnly(baselineState), 0, 0);
}
ccSockets = new TreeMap<>();
ccSockets.put(0, new ClusterControllerStateRestAPI.Socket("localhost", 80));
restAPI = new ClusterControllerStateRestAPI(new ClusterControllerStateRestAPI.FleetControllerResolver() {
@Override
public Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers() {
Map<String, RemoteClusterControllerTaskScheduler> fleetControllers = new LinkedHashMap<>();
fleetControllers.put(books.context.cluster.getName(), books);
fleetControllers.put(music.context.cluster.getName(), music);
return fleetControllers;
}
}, ccSockets);
} | class StateRequest implements UnitStateRequest {
private String[] path;
private int recursive;
StateRequest(String req, int recursive) {
path = req.isEmpty() ? new String[0] : req.split("/");
this.recursive = recursive;
}
@Override
public int getRecursiveLevels() { return recursive;
}
@Override
public String[] getUnitPath() { return path; }
} | class StateRequest implements UnitStateRequest {
private String[] path;
private int recursive;
StateRequest(String req, int recursive) {
path = req.isEmpty() ? new String[0] : req.split("/");
this.recursive = recursive;
}
@Override
public int getRecursiveLevels() { return recursive;
}
@Override
public String[] getUnitPath() { return path; }
} |
Fixed | protected void setUp(boolean dontInitializeNode2) throws Exception {
Distribution distribution = new Distribution(Distribution.getSimpleGroupConfig(2, 10));
jsonWriter.setDefaultPathPrefix("/cluster/v2");
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(0, 1, 2, 3);
ContentCluster cluster = new ContentCluster(
"books", nodes, distribution, 6 /* minStorageNodesUp*/, 0.9 /* minRatioOfStorageNodesUp */
/* determineBucketsFromBucketSpaceMetric */);
initializeCluster(cluster, nodes);
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4"));
Map<String, AnnotatedClusterState> bucketSpaceStates = new HashMap<>();
bucketSpaceStates.put("default", AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4 .3.s:m")));
bucketSpaceStates.put("global", baselineState);
books = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.of(baselineState, bucketSpaceStates), 0, 0);
}
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(1, 2, 3, 5, 7);
Set<ConfiguredNode> nodesInSlobrok = FleetControllerTest.toNodes(1, 3, 5, 7);
ContentCluster cluster = new ContentCluster(
"music", nodes, distribution, 4 /* minStorageNodesUp*/, 0.0 /* minRatioOfStorageNodesUp */
/* determineBucketsFromBucketSpaceMetric */);
if (dontInitializeNode2) {
initializeCluster(cluster, nodesInSlobrok);
}
else {
initializeCluster(cluster, nodes);
}
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:8 .0.s:d .2.s:d .4.s:d .6.s:d "
+ "storage:8 .0.s:d .2.s:d .4.s:d .6.s:d"));
music = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.ofBaselineOnly(baselineState), 0, 0);
}
ccSockets = new TreeMap<>();
ccSockets.put(0, new ClusterControllerStateRestAPI.Socket("localhost", 80));
restAPI = new ClusterControllerStateRestAPI(new ClusterControllerStateRestAPI.FleetControllerResolver() {
@Override
public Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers() {
Map<String, RemoteClusterControllerTaskScheduler> fleetControllers = new LinkedHashMap<>();
fleetControllers.put(books.context.cluster.getName(), books);
fleetControllers.put(music.context.cluster.getName(), music);
return fleetControllers;
}
}, ccSockets);
} | /* determineBucketsFromBucketSpaceMetric */); | protected void setUp(boolean dontInitializeNode2) throws Exception {
Distribution distribution = new Distribution(Distribution.getSimpleGroupConfig(2, 10));
jsonWriter.setDefaultPathPrefix("/cluster/v2");
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(0, 1, 2, 3);
ContentCluster cluster = new ContentCluster(
"books", nodes, distribution, 6 /* minStorageNodesUp*/, 0.9 /* minRatioOfStorageNodesUp */);
initializeCluster(cluster, nodes);
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4"));
Map<String, AnnotatedClusterState> bucketSpaceStates = new HashMap<>();
bucketSpaceStates.put("default", AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:4 storage:4 .3.s:m")));
bucketSpaceStates.put("global", baselineState);
books = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.of(baselineState, bucketSpaceStates), 0, 0);
}
{
Set<ConfiguredNode> nodes = FleetControllerTest.toNodes(1, 2, 3, 5, 7);
Set<ConfiguredNode> nodesInSlobrok = FleetControllerTest.toNodes(1, 3, 5, 7);
ContentCluster cluster = new ContentCluster(
"music", nodes, distribution, 4 /* minStorageNodesUp*/, 0.0 /* minRatioOfStorageNodesUp */);
if (dontInitializeNode2) {
initializeCluster(cluster, nodesInSlobrok);
}
else {
initializeCluster(cluster, nodes);
}
AnnotatedClusterState baselineState = AnnotatedClusterState.withoutAnnotations(ClusterState.stateFromString("distributor:8 .0.s:d .2.s:d .4.s:d .6.s:d "
+ "storage:8 .0.s:d .2.s:d .4.s:d .6.s:d"));
music = new ClusterControllerMock(cluster, baselineState.getClusterState(),
ClusterStateBundle.ofBaselineOnly(baselineState), 0, 0);
}
ccSockets = new TreeMap<>();
ccSockets.put(0, new ClusterControllerStateRestAPI.Socket("localhost", 80));
restAPI = new ClusterControllerStateRestAPI(new ClusterControllerStateRestAPI.FleetControllerResolver() {
@Override
public Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers() {
Map<String, RemoteClusterControllerTaskScheduler> fleetControllers = new LinkedHashMap<>();
fleetControllers.put(books.context.cluster.getName(), books);
fleetControllers.put(music.context.cluster.getName(), music);
return fleetControllers;
}
}, ccSockets);
} | class StateRequest implements UnitStateRequest {
private String[] path;
private int recursive;
StateRequest(String req, int recursive) {
path = req.isEmpty() ? new String[0] : req.split("/");
this.recursive = recursive;
}
@Override
public int getRecursiveLevels() { return recursive;
}
@Override
public String[] getUnitPath() { return path; }
} | class StateRequest implements UnitStateRequest {
private String[] path;
private int recursive;
StateRequest(String req, int recursive) {
path = req.isEmpty() ? new String[0] : req.split("/");
this.recursive = recursive;
}
@Override
public int getRecursiveLevels() { return recursive;
}
@Override
public String[] getUnitPath() { return path; }
} |
Should this use the config server way as well? | private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
if (useConfigServerForTesterAPI(zoneId)) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
} else {
return containersAreUp(id, zoneId, logger);
}
} | if (controller.jobController().cloud().testerReady(deploymentId)) { | private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
if (useConfigServerForTesterAPI(zoneId)) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
} else {
return containersAreUp(id, zoneId, logger);
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (useConfigServerForTesterAPI(zoneId)) {
if ( ! controller.serviceRegistry().configServer().isTesterReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
} else {
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
if (useConfigServerForTesterAPI(zoneId)) {
controller.serviceRegistry().configServer().startTests(getTesterDeploymentId(id), suite, config);
} else {
controller.jobController().cloud().startTests(testerEndpoint.get(), suite, config);
}
return Optional.of(running);
}
private boolean testerReady(RunId id, URI testerEndpoint) {
if (useConfigServerForTesterAPI(id.type().zone(controller.system()))) {
return controller.serviceRegistry().configServer().isTesterReady(getTesterDeploymentId(id));
} else {
return controller.jobController().cloud().testerReady(testerEndpoint);
}
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus;
if (useConfigServerForTesterAPI(id.type().zone(controller.system()))) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
private boolean useConfigServerForTesterAPI(ZoneId zoneId) {
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
return useConfigServer;
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (useConfigServerForTesterAPI(zoneId)) {
if ( ! controller.serviceRegistry().configServer().isTesterReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
} else {
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
if (useConfigServerForTesterAPI(zoneId)) {
controller.serviceRegistry().configServer().startTests(getTesterDeploymentId(id), suite, config);
} else {
controller.jobController().cloud().startTests(testerEndpoint.get(), suite, config);
}
return Optional.of(running);
}
private boolean testerReady(RunId id, URI testerEndpoint) {
if (useConfigServerForTesterAPI(id.type().zone(controller.system()))) {
return controller.serviceRegistry().configServer().isTesterReady(getTesterDeploymentId(id));
} else {
return controller.jobController().cloud().testerReady(testerEndpoint);
}
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus;
if (useConfigServerForTesterAPI(id.type().zone(controller.system()))) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
private boolean useConfigServerForTesterAPI(ZoneId zoneId) {
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
return useConfigServer;
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
It does (if argument is deployment id the config server will be used) | private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
if (useConfigServerForTesterAPI(zoneId)) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
} else {
return containersAreUp(id, zoneId, logger);
}
} | if (controller.jobController().cloud().testerReady(deploymentId)) { | private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
if (useConfigServerForTesterAPI(zoneId)) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
} else {
return containersAreUp(id, zoneId, logger);
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (useConfigServerForTesterAPI(zoneId)) {
if ( ! controller.serviceRegistry().configServer().isTesterReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
} else {
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
if (useConfigServerForTesterAPI(zoneId)) {
controller.serviceRegistry().configServer().startTests(getTesterDeploymentId(id), suite, config);
} else {
controller.jobController().cloud().startTests(testerEndpoint.get(), suite, config);
}
return Optional.of(running);
}
private boolean testerReady(RunId id, URI testerEndpoint) {
if (useConfigServerForTesterAPI(id.type().zone(controller.system()))) {
return controller.serviceRegistry().configServer().isTesterReady(getTesterDeploymentId(id));
} else {
return controller.jobController().cloud().testerReady(testerEndpoint);
}
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus;
if (useConfigServerForTesterAPI(id.type().zone(controller.system()))) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
private boolean useConfigServerForTesterAPI(ZoneId zoneId) {
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
return useConfigServer;
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
private static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
private static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, versions, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) {
Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed()
? Optional.of(new ApplicationPackage(controller.applications().applicationStore()
.getDev(id.application(), id.type().zone(controller.system()))))
: Optional.empty();
Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed()
? Optional.of(versions.targetPlatform())
: Optional.empty();
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
applicationPackage,
new DeployOptions(false,
vespaVersion,
false,
setTheStage)),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
case CERTIFICATE_NOT_READY:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
Step step = setTheStage ? installInitialReal : installReal;
return run.stepInfo(step).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
boolean failed = false;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
logger.log(INFO, "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes.");
failed = true;
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
logger.log(INFO, "No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.");
else
logger.log(INFO, "Nodes not able to start with new application package.");
failed = true;
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
logger.log(INFO, "Installation failed to complete within " + timeout.toHours() + "hours!");
failed = true;
}
if (failed) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.allowedDown().asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = summary.down() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged()) {
if (endpointsAvailable(testerId, zone, logger)) {
if (testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
}
else if (run.stepInfo(installTester).get().startTime().get().plus(endpointTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (URI endpoint : endpoints.get(zoneId).values()) {
boolean ready = id.instance().isTester() ? controller.jobController().cloud().testerReady(endpoint)
: controller.jobController().cloud().ready(endpoint);
if (!ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.applications().clusterEndpoints(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
for (var endpoint : endpoints.get(zone).values())
if ( ! controller.jobController().cloud().exists(endpoint)) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpoint + "'.");
return false;
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (useConfigServerForTesterAPI(zoneId)) {
if ( ! controller.serviceRegistry().configServer().isTesterReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
} else {
if (testerEndpoint.isEmpty()) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
if ( ! controller.jobController().cloud().testerReady(testerEndpoint.get())) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
if (useConfigServerForTesterAPI(zoneId)) {
controller.serviceRegistry().configServer().startTests(getTesterDeploymentId(id), suite, config);
} else {
controller.jobController().cloud().startTests(testerEndpoint.get(), suite, config);
}
return Optional.of(running);
}
private boolean testerReady(RunId id, URI testerEndpoint) {
if (useConfigServerForTesterAPI(id.type().zone(controller.system()))) {
return controller.serviceRegistry().configServer().isTesterReady(getTesterDeploymentId(id));
} else {
return controller.jobController().cloud().testerReady(testerEndpoint);
}
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus;
if (useConfigServerForTesterAPI(id.type().zone(controller.system()))) {
testStatus = controller.serviceRegistry().configServer().getTesterStatus(getTesterDeploymentId(id));
} else {
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
}
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.serviceRegistry().mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.serviceRegistry().mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.serviceRegistry().mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.serviceRegistry().mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.serviceRegistry().mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(controller.zoneRegistry().accessControlDomain(),
! controller.system().isPublic(),
useTesterCertificate,
testerFlavorFor(id, spec)
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
private boolean useConfigServerForTesterAPI(ZoneId zoneId) {
BooleanFlag useConfigServerForTesterAPI = Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.bindTo(controller.flagSource());
boolean useConfigServer = useConfigServerForTesterAPI.with(FetchVector.Dimension.ZONE_ID, zoneId.value()).value();
InternalStepRunner.logger.log(LogLevel.INFO, Flags.USE_CONFIG_SERVER_FOR_TESTER_API_CALLS.id().toString() +
" has value " + useConfigServer + " in zone " + zoneId.value());
return useConfigServer;
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.concerns(id.type().environment()))
return step.zones().get(0).testerFlavor();
return Optional.empty();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(AthenzDomain domain, boolean systemUsesAthenz, boolean useTesterCertificate,
NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
AthenzDomain idDomain = ("vespa.vespa.cd".equals(domain.value()) ? AthenzDomain.from("vespa.vespa") : domain);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <!-- Make sure 4080 is the first port. This will be used by the config server. -->\n" +
" <server id='default' port='4080'/>\n" +
" <server id='testertls4443' port='4443'>\n" +
" <config name=\"jdisc.http.connector\">\n" +
" <tlsClientAuthEnforcer>\n" +
" <enable>true</enable>\n" +
" <pathWhitelist>\n" +
" <item>/status.html</item>\n" +
" <item>/state/v1/config</item>\n" +
" </pathWhitelist>\n" +
" </tlsClientAuthEnforcer>\n" +
" </config>\n" +
" <ssl>\n" +
" <private-key-file>/var/lib/sia/keys/" + idDomain.value() + ".tenant.key.pem</private-key-file>\n" +
" <certificate-file>/var/lib/sia/certs/" + idDomain.value() + ".tenant.cert.pem</certificate-file>\n" +
" <ca-certificates-file>/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem</ca-certificates-file>\n" +
" <client-authentication>want</client-authentication>\n" +
" </ssl>\n" +
" </server>\n" +
" <filtering>\n" +
(systemUsesAthenz ?
" <access-control domain='" + domain.value() + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n"
: "") +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain.value() + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
log? | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null) return logWarning("Certificate not found in secret store");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning("Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning("Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning("Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning("Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
List<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toList());
System.out.println(subjectAlternativeNames);
if (!subjectAlternativeNames.containsAll(dnsNamesOf(instance.id(), List.of(zone))))
return logWarning("Certificate is missing SANs");
return true;
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | System.out.println(subjectAlternativeNames); | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null) return logWarning("Certificate not found in secret store");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning("Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning("Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning("Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning("Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if (!subjectAlternativeNames.equals(Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))))
return logWarning("The list of SANs in the certificate does not match what we expect");
return true;
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final ApplicationCertificateProvider applicationCertificateProvider;
private final Clock clock;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
ApplicationCertificateProvider applicationCertificateProvider,
Clock clock) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.applicationCertificateProvider = applicationCertificateProvider;
this.clock = clock;
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.or(() -> Optional.of(provisionEndpointCertificate(instance)));
endpointCertificateMetadata.ifPresent(certificateMetadata -> verifyEndpointCertificate(certificateMetadata, instance, zone));
return endpointCertificateMetadata;
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
ApplicationCertificate newCertificate = applicationCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
EndpointCertificateMetadata provisionedCertificateMetadata = EndpointCertificateMetadataSerializer.fromTlsSecretsKeysString(newCertificate.secretsKeyNamePrefix());
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String message) {
log.log(LogLevel.WARNING, message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final ApplicationCertificateProvider applicationCertificateProvider;
private final Clock clock;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
ApplicationCertificateProvider applicationCertificateProvider,
Clock clock) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.applicationCertificateProvider = applicationCertificateProvider;
this.clock = clock;
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.or(() -> Optional.of(provisionEndpointCertificate(instance)));
endpointCertificateMetadata.ifPresent(certificateMetadata -> verifyEndpointCertificate(certificateMetadata, instance, zone));
return endpointCertificateMetadata;
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
ApplicationCertificate newCertificate = applicationCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
EndpointCertificateMetadata provisionedCertificateMetadata = EndpointCertificateMetadataSerializer.fromTlsSecretsKeysString(newCertificate.secretsKeyNamePrefix());
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String message) {
log.log(LogLevel.WARNING, message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} |
Perhaps also check that the certificate does not contain any additional SAN DNS values? | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null) return logWarning("Certificate not found in secret store");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning("Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning("Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning("Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning("Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
List<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toList());
System.out.println(subjectAlternativeNames);
if (!subjectAlternativeNames.containsAll(dnsNamesOf(instance.id(), List.of(zone))))
return logWarning("Certificate is missing SANs");
return true;
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | if (!subjectAlternativeNames.containsAll(dnsNamesOf(instance.id(), List.of(zone)))) | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null) return logWarning("Certificate not found in secret store");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning("Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning("Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning("Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning("Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if (!subjectAlternativeNames.equals(Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))))
return logWarning("The list of SANs in the certificate does not match what we expect");
return true;
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final ApplicationCertificateProvider applicationCertificateProvider;
private final Clock clock;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
ApplicationCertificateProvider applicationCertificateProvider,
Clock clock) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.applicationCertificateProvider = applicationCertificateProvider;
this.clock = clock;
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.or(() -> Optional.of(provisionEndpointCertificate(instance)));
endpointCertificateMetadata.ifPresent(certificateMetadata -> verifyEndpointCertificate(certificateMetadata, instance, zone));
return endpointCertificateMetadata;
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
ApplicationCertificate newCertificate = applicationCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
EndpointCertificateMetadata provisionedCertificateMetadata = EndpointCertificateMetadataSerializer.fromTlsSecretsKeysString(newCertificate.secretsKeyNamePrefix());
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String message) {
log.log(LogLevel.WARNING, message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final ApplicationCertificateProvider applicationCertificateProvider;
private final Clock clock;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
ApplicationCertificateProvider applicationCertificateProvider,
Clock clock) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.applicationCertificateProvider = applicationCertificateProvider;
this.clock = clock;
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.or(() -> Optional.of(provisionEndpointCertificate(instance)));
endpointCertificateMetadata.ifPresent(certificateMetadata -> verifyEndpointCertificate(certificateMetadata, instance, zone));
return endpointCertificateMetadata;
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
ApplicationCertificate newCertificate = applicationCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
EndpointCertificateMetadata provisionedCertificateMetadata = EndpointCertificateMetadataSerializer.fromTlsSecretsKeysString(newCertificate.secretsKeyNamePrefix());
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String message) {
log.log(LogLevel.WARNING, message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} |
Consider moving this to the constructor. | public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
var endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.orElse(provisionEndpointCertificate(instance));
var useRefreshedEndpointCertificate = Flags.USE_REFRESHED_ENDPOINT_CERTIFICATE.bindTo(flagSource);
if (useRefreshedEndpointCertificate.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value()) {
var latestAvailableVersion = greatestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = new EndpointCertificateMetadata(
endpointCertificateMetadata.keyName(),
endpointCertificateMetadata.certName(),
latestAvailableVersion.getAsInt()
);
if (verifyEndpointCertificate(refreshedCertificateMetadata, instance, zone, "Did not refresh, problems with refreshed certificate: "))
return Optional.of(refreshedCertificateMetadata);
}
}
verifyEndpointCertificate(endpointCertificateMetadata, instance, zone, "Problems while verifying certificate: ");
return Optional.of(endpointCertificateMetadata);
} | var useRefreshedEndpointCertificate = Flags.USE_REFRESHED_ENDPOINT_CERTIFICATE.bindTo(flagSource); | public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
var endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.orElseGet(() -> provisionEndpointCertificate(instance));
if (useRefreshedEndpointCertificate.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value()) {
var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = new EndpointCertificateMetadata(
endpointCertificateMetadata.keyName(),
endpointCertificateMetadata.certName(),
latestAvailableVersion.getAsInt()
);
if (verifyEndpointCertificate(refreshedCertificateMetadata, instance, zone, "Did not refresh, problems with refreshed certificate: "))
return Optional.of(refreshedCertificateMetadata);
}
}
verifyEndpointCertificate(endpointCertificateMetadata, instance, zone, "Problems while verifying certificate: ");
return Optional.of(endpointCertificateMetadata);
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final ApplicationCertificateProvider applicationCertificateProvider;
private final Clock clock;
private final FlagSource flagSource;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
ApplicationCertificateProvider applicationCertificateProvider,
Clock clock, FlagSource flagSource) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.applicationCertificateProvider = applicationCertificateProvider;
this.clock = clock;
this.flagSource = flagSource;
}
private OptionalInt greatestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.union(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
ApplicationCertificate newCertificate = applicationCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
EndpointCertificateMetadata provisionedCertificateMetadata = EndpointCertificateMetadataSerializer.fromTlsSecretsKeysString(newCertificate.secretsKeyNamePrefix());
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone, String warningPrefix) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null) return logWarning(warningPrefix, "Certificate not found in secret store");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning(warningPrefix, "Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning(warningPrefix, "Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning(warningPrefix, "Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning(warningPrefix, "Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if (!subjectAlternativeNames.equals(Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))))
return logWarning(warningPrefix, "The list of SANs in the certificate does not match what we expect");
return true;
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
}
private static boolean logWarning(String warningPrefix, String message) {
log.log(LogLevel.WARNING, warningPrefix + message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final ApplicationCertificateProvider applicationCertificateProvider;
private final Clock clock;
private final BooleanFlag useRefreshedEndpointCertificate;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
ApplicationCertificateProvider applicationCertificateProvider,
Clock clock, FlagSource flagSource) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.applicationCertificateProvider = applicationCertificateProvider;
this.clock = clock;
this.useRefreshedEndpointCertificate = Flags.USE_REFRESHED_ENDPOINT_CERTIFICATE.bindTo(flagSource);
}
private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
ApplicationCertificate newCertificate = applicationCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
EndpointCertificateMetadata provisionedCertificateMetadata = EndpointCertificateMetadataSerializer.fromTlsSecretsKeysString(newCertificate.secretsKeyNamePrefix());
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone, String warningPrefix) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null) return logWarning(warningPrefix, "Certificate not found in secret store");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning(warningPrefix, "Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning(warningPrefix, "Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning(warningPrefix, "Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning(warningPrefix, "Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if (!subjectAlternativeNames.equals(Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))))
return logWarning(warningPrefix, "The list of SANs in the certificate does not match what we expect");
return true;
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
}
private static boolean logWarning(String warningPrefix, String message) {
log.log(LogLevel.WARNING, warningPrefix + message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} |
@jonmv This was the bug. When source revision is empty, no fields are written to the `latestVersion` object, but the object itself is created, thus becoming an unknown version. Subsequent reads fail because the `Application` constructor throws if `latestVersion` is unknown so fromSlime/toSlime is asymmetric. > What are the other changes here? Cleanup of `SourceRevision`. `SourceRevision` where all fields are empty is now treated as an empty source revision. The fact they can be empty might point to a bug or lack of validation in the submission API? | private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber));
applicationVersion.source().ifPresent(source -> toSlime(source, object.setObject(sourceRevisionField)));
applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email));
applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString()));
applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli()));
applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url));
applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit));
} | applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber)); | private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber));
applicationVersion.source().ifPresent(source -> toSlime(source, object.setObject(sourceRevisionField)));
applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email));
applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString()));
applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli()));
applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url));
applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit));
} | class ApplicationSerializer {
private static final String idField = "id";
private static final String createdAtField = "createdAt";
private static final String deploymentSpecField = "deploymentSpecField";
private static final String validationOverridesField = "validationOverrides";
private static final String instancesField = "instances";
private static final String deployingField = "deployingField";
private static final String projectIdField = "projectId";
private static final String latestVersionField = "latestVersion";
private static final String pinnedField = "pinned";
private static final String deploymentIssueField = "deploymentIssueId";
private static final String ownershipIssueIdField = "ownershipIssueId";
private static final String ownerField = "confirmedOwner";
private static final String majorVersionField = "majorVersion";
private static final String writeQualityField = "writeQuality";
private static final String queryQualityField = "queryQuality";
private static final String pemDeployKeysField = "pemDeployKeys";
private static final String assignedRotationClusterField = "clusterId";
private static final String assignedRotationRotationField = "rotationId";
private static final String versionField = "version";
private static final String instanceNameField = "instanceName";
private static final String deploymentsField = "deployments";
private static final String deploymentJobsField = "deploymentJobs";
private static final String assignedRotationsField = "assignedRotations";
private static final String assignedRotationEndpointField = "endpointId";
private static final String zoneField = "zone";
private static final String environmentField = "environment";
private static final String regionField = "region";
private static final String deployTimeField = "deployTime";
private static final String applicationBuildNumberField = "applicationBuildNumber";
private static final String applicationPackageRevisionField = "applicationPackageRevision";
private static final String sourceRevisionField = "sourceRevision";
private static final String repositoryField = "repositoryField";
private static final String branchField = "branchField";
private static final String commitField = "commitField";
private static final String authorEmailField = "authorEmailField";
private static final String compileVersionField = "compileVersion";
private static final String buildTimeField = "buildTime";
private static final String sourceUrlField = "sourceUrl";
private static final String lastQueriedField = "lastQueried";
private static final String lastWrittenField = "lastWritten";
private static final String lastQueriesPerSecondField = "lastQueriesPerSecond";
private static final String lastWritesPerSecondField = "lastWritesPerSecond";
private static final String jobStatusField = "jobStatus";
private static final String jobTypeField = "jobType";
private static final String pausedUntilField = "pausedUntil";
private static final String clusterInfoField = "clusterInfo";
private static final String clusterInfoFlavorField = "flavor";
private static final String clusterInfoCostField = "cost";
private static final String clusterInfoCpuField = "flavorCpu";
private static final String clusterInfoMemField = "flavorMem";
private static final String clusterInfoDiskField = "flavorDisk";
private static final String clusterInfoTypeField = "clusterType";
private static final String clusterInfoHostnamesField = "hostnames";
private static final String deploymentMetricsField = "metrics";
private static final String deploymentMetricsQPSField = "queriesPerSecond";
private static final String deploymentMetricsWPSField = "writesPerSecond";
private static final String deploymentMetricsDocsField = "documentCount";
private static final String deploymentMetricsQueryLatencyField = "queryLatencyMillis";
private static final String deploymentMetricsWriteLatencyField = "writeLatencyMillis";
private static final String deploymentMetricsUpdateTime = "lastUpdated";
private static final String deploymentMetricsWarningsField = "warnings";
private static final String rotationStatusField = "rotationStatus2";
private static final String rotationIdField = "rotationId";
private static final String lastUpdatedField = "lastUpdated";
private static final String rotationStateField = "state";
private static final String statusField = "status";
private final Cache<Long, Application> cache = CacheBuilder.newBuilder().maximumSize(1000).build();
public Slime toSlime(Application application) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString(idField, application.id().serialized());
root.setLong(createdAtField, application.createdAt().toEpochMilli());
root.setString(deploymentSpecField, application.deploymentSpec().xmlForm());
root.setString(validationOverridesField, application.validationOverrides().xmlForm());
application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId));
application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value()));
application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value()));
application.owner().ifPresent(owner -> root.setString(ownerField, owner.username()));
application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion));
root.setDouble(queryQualityField, application.metrics().queryServiceQuality());
root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
deployKeysToSlime(application.deployKeys(), root.setArray(pemDeployKeysField));
application.latestVersion().ifPresent(version -> toSlime(version, root.setObject(latestVersionField)));
instancesToSlime(application, root.setArray(instancesField));
return slime;
}
private void instancesToSlime(Application application, Cursor array) {
for (Instance instance : application.instances().values()) {
Cursor instanceObject = array.addObject();
instanceObject.setString(instanceNameField, instance.name().value());
deploymentsToSlime(instance.deployments().values(), instanceObject.setArray(deploymentsField));
toSlime(instance.jobPauses(), instanceObject.setObject(deploymentJobsField));
assignedRotationsToSlime(instance.rotations(), instanceObject, assignedRotationsField);
toSlime(instance.rotationStatus(), instanceObject.setArray(rotationStatusField));
toSlime(instance.change(), instanceObject, deployingField);
}
}
private void deployKeysToSlime(Set<PublicKey> deployKeys, Cursor array) {
deployKeys.forEach(key -> array.addString(KeyUtils.toPem(key)));
}
private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) {
for (Deployment deployment : deployments)
deploymentToSlime(deployment, array.addObject());
}
private void deploymentToSlime(Deployment deployment, Cursor object) {
zoneIdToSlime(deployment.zone(), object.setObject(zoneField));
object.setString(versionField, deployment.version().toString());
object.setLong(deployTimeField, deployment.at().toEpochMilli());
toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField));
clusterInfoToSlime(deployment.clusterInfo(), object);
deploymentMetricsToSlime(deployment.metrics(), object);
deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value));
}
private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) {
Cursor root = object.setObject(deploymentMetricsField);
root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond());
root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond());
root.setDouble(deploymentMetricsDocsField, metrics.documentCount());
root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis());
root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli()));
if (!metrics.warnings().isEmpty()) {
Cursor warningsObject = root.setObject(deploymentMetricsWarningsField);
metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count));
}
}
private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) {
Cursor root = object.setObject(clusterInfoField);
for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) {
toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
}
}
private void toSlime(ClusterInfo info, Cursor object) {
object.setString(clusterInfoFlavorField, info.getFlavor());
object.setLong(clusterInfoCostField, info.getFlavorCost());
object.setDouble(clusterInfoCpuField, info.getFlavorCPU());
object.setDouble(clusterInfoMemField, info.getFlavorMem());
object.setDouble(clusterInfoDiskField, info.getFlavorDisk());
object.setString(clusterInfoTypeField, info.getClusterType().name());
Cursor array = object.setArray(clusterInfoHostnamesField);
for (String host : info.getHostnames()) {
array.addString(host);
}
}
private void zoneIdToSlime(ZoneId zone, Cursor object) {
object.setString(environmentField, zone.environment().value());
object.setString(regionField, zone.region().value());
}
private void toSlime(SourceRevision sourceRevision, Cursor object) {
object.setString(repositoryField, sourceRevision.repository());
object.setString(branchField, sourceRevision.branch());
object.setString(commitField, sourceRevision.commit());
}
private void toSlime(Map<JobType, Instant> jobPauses, Cursor cursor) {
Cursor jobStatusArray = cursor.setArray(jobStatusField);
jobPauses.forEach((type, until) -> {
Cursor jobPauseObject = jobStatusArray.addObject();
jobPauseObject.setString(jobTypeField, type.jobName());
jobPauseObject.setLong(pausedUntilField, until.toEpochMilli());
});
}
private void toSlime(Change deploying, Cursor parentObject, String fieldName) {
if (deploying.isEmpty()) return;
Cursor object = parentObject.setObject(fieldName);
if (deploying.platform().isPresent())
object.setString(versionField, deploying.platform().get().toString());
if (deploying.application().isPresent())
toSlime(deploying.application().get(), object);
if (deploying.isPinned())
object.setBool(pinnedField, true);
}
private void toSlime(RotationStatus status, Cursor array) {
status.asMap().forEach((rotationId, targets) -> {
Cursor rotationObject = array.addObject();
rotationObject.setString(rotationIdField, rotationId.asString());
rotationObject.setLong(lastUpdatedField, targets.lastUpdated().toEpochMilli());
Cursor statusArray = rotationObject.setArray(statusField);
targets.asMap().forEach((zone, state) -> {
Cursor statusObject = statusArray.addObject();
zoneIdToSlime(zone, statusObject);
statusObject.setString(rotationStateField, state.name());
});
});
}
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) {
var rotationsArray = parent.setArray(fieldName);
for (var rotation : rotations) {
var object = rotationsArray.addObject();
object.setString(assignedRotationEndpointField, rotation.endpointId().id());
object.setString(assignedRotationRotationField, rotation.rotationId().asString());
object.setString(assignedRotationClusterField, rotation.clusterId().value());
}
}
public Application fromSlime(byte[] data) {
var key = Hashing.sipHash24().hashBytes(data).asLong();
try {
return cache.get(key, () -> fromSlime(SlimeUtils.jsonToSlime(data)));
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
private Application fromSlime(Slime slime) {
Inspector root = slime.get();
TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString());
Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong());
DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false);
ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
Optional<IssueId> deploymentIssueId = Serializers.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
Optional<IssueId> ownershipIssueId = Serializers.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
Optional<User> owner = Serializers.optionalString(root.field(ownerField)).map(User::from);
OptionalInt majorVersion = Serializers.optionalInteger(root.field(majorVersionField));
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
Set<PublicKey> deployKeys = deployKeysFromSlime(root.field(pemDeployKeysField));
List<Instance> instances = instancesFromSlime(id, deploymentSpec, root.field(instancesField));
OptionalLong projectId = Serializers.optionalLong(root.field(projectIdField));
Optional<ApplicationVersion> latestVersion = latestVersionFromSlime(root.field(latestVersionField));
return new Application(id, createdAt, deploymentSpec, validationOverrides,
deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics,
deployKeys, projectId, latestVersion, instances);
}
private Optional<ApplicationVersion> latestVersionFromSlime(Inspector latestVersionObject) {
if (latestVersionObject.valid())
return Optional.of(applicationVersionFromSlime(latestVersionObject));
return Optional.empty();
}
private List<Instance> instancesFromSlime(TenantAndApplicationId id, DeploymentSpec deploymentSpec, Inspector field) {
List<Instance> instances = new ArrayList<>();
field.traverse((ArrayTraverser) (name, object) -> {
InstanceName instanceName = InstanceName.from(object.field(instanceNameField).asString());
List<Deployment> deployments = deploymentsFromSlime(object.field(deploymentsField));
Map<JobType, Instant> jobPauses = jobPausesFromSlime(object.field(deploymentJobsField));
List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, instanceName, object);
RotationStatus rotationStatus = rotationStatusFromSlime(object);
Change change = changeFromSlime(object.field(deployingField));
instances.add(new Instance(id.instance(instanceName),
deployments,
jobPauses,
assignedRotations,
rotationStatus,
change));
});
return instances;
}
private Set<PublicKey> deployKeysFromSlime(Inspector array) {
Set<PublicKey> keys = new LinkedHashSet<>();
array.traverse((ArrayTraverser) (__, key) -> keys.add(KeyUtils.fromPemEncodedPublicKey(key.asString())));
return keys;
}
private List<Deployment> deploymentsFromSlime(Inspector array) {
List<Deployment> deployments = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item)));
return deployments;
}
private Deployment deploymentFromSlime(Inspector deploymentObject) {
return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)),
applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)),
Version.fromString(deploymentObject.field(versionField).asString()),
Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()),
clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)),
deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)),
DeploymentActivity.create(Serializers.optionalInstant(deploymentObject.field(lastQueriedField)),
Serializers.optionalInstant(deploymentObject.field(lastWrittenField)),
Serializers.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)),
Serializers.optionalDouble(deploymentObject.field(lastWritesPerSecondField))));
}
private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) {
Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ?
Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) :
Optional.empty();
return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(),
object.field(deploymentMetricsWPSField).asDouble(),
object.field(deploymentMetricsDocsField).asDouble(),
object.field(deploymentMetricsQueryLatencyField).asDouble(),
object.field(deploymentMetricsWriteLatencyField).asDouble(),
instant,
deploymentWarningsFrom(object.field(deploymentMetricsWarningsField)));
}
private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) {
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name),
(int) value.asLong()));
return Collections.unmodifiableMap(warnings);
}
private RotationStatus rotationStatusFromSlime(Inspector parentObject) {
var object = parentObject.field(rotationStatusField);
var statusMap = new LinkedHashMap<RotationId, RotationStatus.Targets>();
object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()),
new RotationStatus.Targets(
singleRotationStatusFromSlime(statusObject.field(statusField)),
Instant.ofEpochMilli(statusObject.field(lastUpdatedField).asLong()))));
return RotationStatus.from(statusMap);
}
private Map<ZoneId, RotationState> singleRotationStatusFromSlime(Inspector object) {
if (!object.valid()) {
return Collections.emptyMap();
}
Map<ZoneId, RotationState> rotationStatus = new LinkedHashMap<>();
object.traverse((ArrayTraverser) (idx, statusObject) -> {
var zone = zoneIdFromSlime(statusObject);
var status = RotationState.valueOf(statusObject.field(rotationStateField).asString());
rotationStatus.put(zone, status);
});
return Collections.unmodifiableMap(rotationStatus);
}
private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) {
Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>();
object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value)));
return map;
}
private ClusterInfo clusterInfoFromSlime(Inspector inspector) {
String flavor = inspector.field(clusterInfoFlavorField).asString();
int cost = (int)inspector.field(clusterInfoCostField).asLong();
String type = inspector.field(clusterInfoTypeField).asString();
double flavorCpu = inspector.field(clusterInfoCpuField).asDouble();
double flavorMem = inspector.field(clusterInfoMemField).asDouble();
double flavorDisk = inspector.field(clusterInfoDiskField).asDouble();
List<String> hostnames = new ArrayList<>();
inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString()));
return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames);
}
private ZoneId zoneIdFromSlime(Inspector object) {
return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString());
}
private ApplicationVersion applicationVersionFromSlime(Inspector object) {
if ( ! object.valid()) return ApplicationVersion.unknown;
OptionalLong applicationBuildNumber = Serializers.optionalLong(object.field(applicationBuildNumberField));
if (applicationBuildNumber.isEmpty())
return ApplicationVersion.unknown;
Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField));
Optional<String> authorEmail = Serializers.optionalString(object.field(authorEmailField));
Optional<Version> compileVersion = Serializers.optionalString(object.field(compileVersionField)).map(Version::fromString);
Optional<Instant> buildTime = Serializers.optionalInstant(object.field(buildTimeField));
Optional<String> sourceUrl = Serializers.optionalString(object.field(sourceUrlField));
Optional<String> commit = Serializers.optionalString(object.field(commitField));
return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit);
}
private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
var repository = object.field(repositoryField).asString();
var branch = object.field(branchField).asString();
var commit = object.field(commitField).asString();
if (repository.isBlank() && branch.isBlank() && commit.isBlank()) return Optional.empty();
return Optional.of(new SourceRevision(repository, branch, commit));
}
private Map<JobType, Instant> jobPausesFromSlime(Inspector object) {
Map<JobType, Instant> jobPauses = new HashMap<>();
object.field(jobStatusField).traverse((ArrayTraverser) (__, jobPauseObject) ->
JobType.fromOptionalJobName(jobPauseObject.field(jobTypeField).asString())
.ifPresent(jobType -> jobPauses.put(jobType,
Instant.ofEpochMilli(jobPauseObject.field(pausedUntilField).asLong()))));
return jobPauses;
}
private Change changeFromSlime(Inspector object) {
if ( ! object.valid()) return Change.empty();
Inspector versionFieldValue = object.field(versionField);
Change change = Change.empty();
if (versionFieldValue.valid())
change = Change.of(Version.fromString(versionFieldValue.asString()));
if (object.field(applicationBuildNumberField).valid())
change = change.with(applicationVersionFromSlime(object));
if (object.field(pinnedField).asBool())
change = change.withPin();
return change;
}
private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, InstanceName instance, Inspector root) {
var assignedRotations = new LinkedHashMap<EndpointId, AssignedRotation>();
root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> {
var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString());
var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString());
var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString());
var regions = deploymentSpec.instance(instance)
.map(spec -> globalEndpointRegions(spec, endpointId))
.orElse(Set.of());
assignedRotations.putIfAbsent(endpointId, new AssignedRotation(clusterId, endpointId, rotationId, regions));
});
return List.copyOf(assignedRotations.values());
}
private Set<RegionName> globalEndpointRegions(DeploymentInstanceSpec spec, EndpointId endpointId) {
if (spec.globalServiceId().isPresent())
return spec.zones().stream()
.flatMap(zone -> zone.region().stream())
.collect(Collectors.toSet());
return spec.endpoints().stream()
.filter(endpoint -> endpoint.endpointId().equals(endpointId.id()))
.flatMap(endpoint -> endpoint.regions().stream())
.collect(Collectors.toSet());
}
} | class ApplicationSerializer {
private static final String idField = "id";
private static final String createdAtField = "createdAt";
private static final String deploymentSpecField = "deploymentSpecField";
private static final String validationOverridesField = "validationOverrides";
private static final String instancesField = "instances";
private static final String deployingField = "deployingField";
private static final String projectIdField = "projectId";
private static final String latestVersionField = "latestVersion";
private static final String pinnedField = "pinned";
private static final String deploymentIssueField = "deploymentIssueId";
private static final String ownershipIssueIdField = "ownershipIssueId";
private static final String ownerField = "confirmedOwner";
private static final String majorVersionField = "majorVersion";
private static final String writeQualityField = "writeQuality";
private static final String queryQualityField = "queryQuality";
private static final String pemDeployKeysField = "pemDeployKeys";
private static final String assignedRotationClusterField = "clusterId";
private static final String assignedRotationRotationField = "rotationId";
private static final String versionField = "version";
private static final String instanceNameField = "instanceName";
private static final String deploymentsField = "deployments";
private static final String deploymentJobsField = "deploymentJobs";
private static final String assignedRotationsField = "assignedRotations";
private static final String assignedRotationEndpointField = "endpointId";
private static final String zoneField = "zone";
private static final String environmentField = "environment";
private static final String regionField = "region";
private static final String deployTimeField = "deployTime";
private static final String applicationBuildNumberField = "applicationBuildNumber";
private static final String applicationPackageRevisionField = "applicationPackageRevision";
private static final String sourceRevisionField = "sourceRevision";
private static final String repositoryField = "repositoryField";
private static final String branchField = "branchField";
private static final String commitField = "commitField";
private static final String authorEmailField = "authorEmailField";
private static final String compileVersionField = "compileVersion";
private static final String buildTimeField = "buildTime";
private static final String sourceUrlField = "sourceUrl";
private static final String lastQueriedField = "lastQueried";
private static final String lastWrittenField = "lastWritten";
private static final String lastQueriesPerSecondField = "lastQueriesPerSecond";
private static final String lastWritesPerSecondField = "lastWritesPerSecond";
private static final String jobStatusField = "jobStatus";
private static final String jobTypeField = "jobType";
private static final String pausedUntilField = "pausedUntil";
private static final String clusterInfoField = "clusterInfo";
private static final String clusterInfoFlavorField = "flavor";
private static final String clusterInfoCostField = "cost";
private static final String clusterInfoCpuField = "flavorCpu";
private static final String clusterInfoMemField = "flavorMem";
private static final String clusterInfoDiskField = "flavorDisk";
private static final String clusterInfoTypeField = "clusterType";
private static final String clusterInfoHostnamesField = "hostnames";
private static final String deploymentMetricsField = "metrics";
private static final String deploymentMetricsQPSField = "queriesPerSecond";
private static final String deploymentMetricsWPSField = "writesPerSecond";
private static final String deploymentMetricsDocsField = "documentCount";
private static final String deploymentMetricsQueryLatencyField = "queryLatencyMillis";
private static final String deploymentMetricsWriteLatencyField = "writeLatencyMillis";
private static final String deploymentMetricsUpdateTime = "lastUpdated";
private static final String deploymentMetricsWarningsField = "warnings";
private static final String rotationStatusField = "rotationStatus2";
private static final String rotationIdField = "rotationId";
private static final String lastUpdatedField = "lastUpdated";
private static final String rotationStateField = "state";
private static final String statusField = "status";
private final Cache<Long, Application> cache = CacheBuilder.newBuilder().maximumSize(1000).build();
public Slime toSlime(Application application) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString(idField, application.id().serialized());
root.setLong(createdAtField, application.createdAt().toEpochMilli());
root.setString(deploymentSpecField, application.deploymentSpec().xmlForm());
root.setString(validationOverridesField, application.validationOverrides().xmlForm());
application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId));
application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value()));
application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value()));
application.owner().ifPresent(owner -> root.setString(ownerField, owner.username()));
application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion));
root.setDouble(queryQualityField, application.metrics().queryServiceQuality());
root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
deployKeysToSlime(application.deployKeys(), root.setArray(pemDeployKeysField));
application.latestVersion().ifPresent(version -> toSlime(version, root.setObject(latestVersionField)));
instancesToSlime(application, root.setArray(instancesField));
return slime;
}
private void instancesToSlime(Application application, Cursor array) {
for (Instance instance : application.instances().values()) {
Cursor instanceObject = array.addObject();
instanceObject.setString(instanceNameField, instance.name().value());
deploymentsToSlime(instance.deployments().values(), instanceObject.setArray(deploymentsField));
toSlime(instance.jobPauses(), instanceObject.setObject(deploymentJobsField));
assignedRotationsToSlime(instance.rotations(), instanceObject, assignedRotationsField);
toSlime(instance.rotationStatus(), instanceObject.setArray(rotationStatusField));
toSlime(instance.change(), instanceObject, deployingField);
}
}
private void deployKeysToSlime(Set<PublicKey> deployKeys, Cursor array) {
deployKeys.forEach(key -> array.addString(KeyUtils.toPem(key)));
}
private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) {
for (Deployment deployment : deployments)
deploymentToSlime(deployment, array.addObject());
}
private void deploymentToSlime(Deployment deployment, Cursor object) {
zoneIdToSlime(deployment.zone(), object.setObject(zoneField));
object.setString(versionField, deployment.version().toString());
object.setLong(deployTimeField, deployment.at().toEpochMilli());
toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField));
clusterInfoToSlime(deployment.clusterInfo(), object);
deploymentMetricsToSlime(deployment.metrics(), object);
deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value));
}
private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) {
Cursor root = object.setObject(deploymentMetricsField);
root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond());
root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond());
root.setDouble(deploymentMetricsDocsField, metrics.documentCount());
root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis());
root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli()));
if (!metrics.warnings().isEmpty()) {
Cursor warningsObject = root.setObject(deploymentMetricsWarningsField);
metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count));
}
}
private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) {
Cursor root = object.setObject(clusterInfoField);
for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) {
toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
}
}
private void toSlime(ClusterInfo info, Cursor object) {
object.setString(clusterInfoFlavorField, info.getFlavor());
object.setLong(clusterInfoCostField, info.getFlavorCost());
object.setDouble(clusterInfoCpuField, info.getFlavorCPU());
object.setDouble(clusterInfoMemField, info.getFlavorMem());
object.setDouble(clusterInfoDiskField, info.getFlavorDisk());
object.setString(clusterInfoTypeField, info.getClusterType().name());
Cursor array = object.setArray(clusterInfoHostnamesField);
for (String host : info.getHostnames()) {
array.addString(host);
}
}
private void zoneIdToSlime(ZoneId zone, Cursor object) {
object.setString(environmentField, zone.environment().value());
object.setString(regionField, zone.region().value());
}
private void toSlime(SourceRevision sourceRevision, Cursor object) {
object.setString(repositoryField, sourceRevision.repository());
object.setString(branchField, sourceRevision.branch());
object.setString(commitField, sourceRevision.commit());
}
private void toSlime(Map<JobType, Instant> jobPauses, Cursor cursor) {
Cursor jobStatusArray = cursor.setArray(jobStatusField);
jobPauses.forEach((type, until) -> {
Cursor jobPauseObject = jobStatusArray.addObject();
jobPauseObject.setString(jobTypeField, type.jobName());
jobPauseObject.setLong(pausedUntilField, until.toEpochMilli());
});
}
private void toSlime(Change deploying, Cursor parentObject, String fieldName) {
if (deploying.isEmpty()) return;
Cursor object = parentObject.setObject(fieldName);
if (deploying.platform().isPresent())
object.setString(versionField, deploying.platform().get().toString());
if (deploying.application().isPresent())
toSlime(deploying.application().get(), object);
if (deploying.isPinned())
object.setBool(pinnedField, true);
}
private void toSlime(RotationStatus status, Cursor array) {
status.asMap().forEach((rotationId, targets) -> {
Cursor rotationObject = array.addObject();
rotationObject.setString(rotationIdField, rotationId.asString());
rotationObject.setLong(lastUpdatedField, targets.lastUpdated().toEpochMilli());
Cursor statusArray = rotationObject.setArray(statusField);
targets.asMap().forEach((zone, state) -> {
Cursor statusObject = statusArray.addObject();
zoneIdToSlime(zone, statusObject);
statusObject.setString(rotationStateField, state.name());
});
});
}
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) {
var rotationsArray = parent.setArray(fieldName);
for (var rotation : rotations) {
var object = rotationsArray.addObject();
object.setString(assignedRotationEndpointField, rotation.endpointId().id());
object.setString(assignedRotationRotationField, rotation.rotationId().asString());
object.setString(assignedRotationClusterField, rotation.clusterId().value());
}
}
public Application fromSlime(byte[] data) {
var key = Hashing.sipHash24().hashBytes(data).asLong();
try {
return cache.get(key, () -> fromSlime(SlimeUtils.jsonToSlime(data)));
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
private Application fromSlime(Slime slime) {
Inspector root = slime.get();
TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString());
Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong());
DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false);
ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
Optional<IssueId> deploymentIssueId = Serializers.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
Optional<IssueId> ownershipIssueId = Serializers.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
Optional<User> owner = Serializers.optionalString(root.field(ownerField)).map(User::from);
OptionalInt majorVersion = Serializers.optionalInteger(root.field(majorVersionField));
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
Set<PublicKey> deployKeys = deployKeysFromSlime(root.field(pemDeployKeysField));
List<Instance> instances = instancesFromSlime(id, deploymentSpec, root.field(instancesField));
OptionalLong projectId = Serializers.optionalLong(root.field(projectIdField));
Optional<ApplicationVersion> latestVersion = latestVersionFromSlime(root.field(latestVersionField));
return new Application(id, createdAt, deploymentSpec, validationOverrides,
deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics,
deployKeys, projectId, latestVersion, instances);
}
private Optional<ApplicationVersion> latestVersionFromSlime(Inspector latestVersionObject) {
if (latestVersionObject.valid())
return Optional.of(applicationVersionFromSlime(latestVersionObject));
return Optional.empty();
}
private List<Instance> instancesFromSlime(TenantAndApplicationId id, DeploymentSpec deploymentSpec, Inspector field) {
List<Instance> instances = new ArrayList<>();
field.traverse((ArrayTraverser) (name, object) -> {
InstanceName instanceName = InstanceName.from(object.field(instanceNameField).asString());
List<Deployment> deployments = deploymentsFromSlime(object.field(deploymentsField));
Map<JobType, Instant> jobPauses = jobPausesFromSlime(object.field(deploymentJobsField));
List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, instanceName, object);
RotationStatus rotationStatus = rotationStatusFromSlime(object);
Change change = changeFromSlime(object.field(deployingField));
instances.add(new Instance(id.instance(instanceName),
deployments,
jobPauses,
assignedRotations,
rotationStatus,
change));
});
return instances;
}
private Set<PublicKey> deployKeysFromSlime(Inspector array) {
Set<PublicKey> keys = new LinkedHashSet<>();
array.traverse((ArrayTraverser) (__, key) -> keys.add(KeyUtils.fromPemEncodedPublicKey(key.asString())));
return keys;
}
private List<Deployment> deploymentsFromSlime(Inspector array) {
List<Deployment> deployments = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item)));
return deployments;
}
private Deployment deploymentFromSlime(Inspector deploymentObject) {
return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)),
applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)),
Version.fromString(deploymentObject.field(versionField).asString()),
Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()),
clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)),
deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)),
DeploymentActivity.create(Serializers.optionalInstant(deploymentObject.field(lastQueriedField)),
Serializers.optionalInstant(deploymentObject.field(lastWrittenField)),
Serializers.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)),
Serializers.optionalDouble(deploymentObject.field(lastWritesPerSecondField))));
}
private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) {
Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ?
Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) :
Optional.empty();
return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(),
object.field(deploymentMetricsWPSField).asDouble(),
object.field(deploymentMetricsDocsField).asDouble(),
object.field(deploymentMetricsQueryLatencyField).asDouble(),
object.field(deploymentMetricsWriteLatencyField).asDouble(),
instant,
deploymentWarningsFrom(object.field(deploymentMetricsWarningsField)));
}
private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) {
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name),
(int) value.asLong()));
return Collections.unmodifiableMap(warnings);
}
private RotationStatus rotationStatusFromSlime(Inspector parentObject) {
var object = parentObject.field(rotationStatusField);
var statusMap = new LinkedHashMap<RotationId, RotationStatus.Targets>();
object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()),
new RotationStatus.Targets(
singleRotationStatusFromSlime(statusObject.field(statusField)),
Instant.ofEpochMilli(statusObject.field(lastUpdatedField).asLong()))));
return RotationStatus.from(statusMap);
}
private Map<ZoneId, RotationState> singleRotationStatusFromSlime(Inspector object) {
if (!object.valid()) {
return Collections.emptyMap();
}
Map<ZoneId, RotationState> rotationStatus = new LinkedHashMap<>();
object.traverse((ArrayTraverser) (idx, statusObject) -> {
var zone = zoneIdFromSlime(statusObject);
var status = RotationState.valueOf(statusObject.field(rotationStateField).asString());
rotationStatus.put(zone, status);
});
return Collections.unmodifiableMap(rotationStatus);
}
private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) {
Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>();
object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value)));
return map;
}
private ClusterInfo clusterInfoFromSlime(Inspector inspector) {
String flavor = inspector.field(clusterInfoFlavorField).asString();
int cost = (int)inspector.field(clusterInfoCostField).asLong();
String type = inspector.field(clusterInfoTypeField).asString();
double flavorCpu = inspector.field(clusterInfoCpuField).asDouble();
double flavorMem = inspector.field(clusterInfoMemField).asDouble();
double flavorDisk = inspector.field(clusterInfoDiskField).asDouble();
List<String> hostnames = new ArrayList<>();
inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString()));
return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames);
}
private ZoneId zoneIdFromSlime(Inspector object) {
return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString());
}
private ApplicationVersion applicationVersionFromSlime(Inspector object) {
if ( ! object.valid()) return ApplicationVersion.unknown;
OptionalLong applicationBuildNumber = Serializers.optionalLong(object.field(applicationBuildNumberField));
if (applicationBuildNumber.isEmpty())
return ApplicationVersion.unknown;
Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField));
Optional<String> authorEmail = Serializers.optionalString(object.field(authorEmailField));
Optional<Version> compileVersion = Serializers.optionalString(object.field(compileVersionField)).map(Version::fromString);
Optional<Instant> buildTime = Serializers.optionalInstant(object.field(buildTimeField));
Optional<String> sourceUrl = Serializers.optionalString(object.field(sourceUrlField));
Optional<String> commit = Serializers.optionalString(object.field(commitField));
return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit);
}
private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
var repository = object.field(repositoryField).asString();
var branch = object.field(branchField).asString();
var commit = object.field(commitField).asString();
if (repository.isBlank() && branch.isBlank() && commit.isBlank()) return Optional.empty();
return Optional.of(new SourceRevision(repository, branch, commit));
}
private Map<JobType, Instant> jobPausesFromSlime(Inspector object) {
Map<JobType, Instant> jobPauses = new HashMap<>();
object.field(jobStatusField).traverse((ArrayTraverser) (__, jobPauseObject) ->
JobType.fromOptionalJobName(jobPauseObject.field(jobTypeField).asString())
.ifPresent(jobType -> jobPauses.put(jobType,
Instant.ofEpochMilli(jobPauseObject.field(pausedUntilField).asLong()))));
return jobPauses;
}
private Change changeFromSlime(Inspector object) {
if ( ! object.valid()) return Change.empty();
Inspector versionFieldValue = object.field(versionField);
Change change = Change.empty();
if (versionFieldValue.valid())
change = Change.of(Version.fromString(versionFieldValue.asString()));
if (object.field(applicationBuildNumberField).valid())
change = change.with(applicationVersionFromSlime(object));
if (object.field(pinnedField).asBool())
change = change.withPin();
return change;
}
private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, InstanceName instance, Inspector root) {
var assignedRotations = new LinkedHashMap<EndpointId, AssignedRotation>();
root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> {
var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString());
var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString());
var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString());
var regions = deploymentSpec.instance(instance)
.map(spec -> globalEndpointRegions(spec, endpointId))
.orElse(Set.of());
assignedRotations.putIfAbsent(endpointId, new AssignedRotation(clusterId, endpointId, rotationId, regions));
});
return List.copyOf(assignedRotations.values());
}
private Set<RegionName> globalEndpointRegions(DeploymentInstanceSpec spec, EndpointId endpointId) {
if (spec.globalServiceId().isPresent())
return spec.zones().stream()
.flatMap(zone -> zone.region().stream())
.collect(Collectors.toSet());
return spec.endpoints().stream()
.filter(endpoint -> endpoint.endpointId().equals(endpointId.id()))
.flatMap(endpoint -> endpoint.regions().stream())
.collect(Collectors.toSet());
}
} |
Yes, this was the bug. We still have serialised `unknown` `latestVersion`s, though? Or did you clean them out by hand? Are the fields all empty? I'd imagine the `commit` field wasn't. The plan was to remove `SourceRevision`, since they're only used for the `commit` and the generated source URLs. | private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber));
applicationVersion.source().ifPresent(source -> toSlime(source, object.setObject(sourceRevisionField)));
applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email));
applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString()));
applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli()));
applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url));
applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit));
} | applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber)); | private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber));
applicationVersion.source().ifPresent(source -> toSlime(source, object.setObject(sourceRevisionField)));
applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email));
applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString()));
applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli()));
applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url));
applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit));
} | class ApplicationSerializer {
private static final String idField = "id";
private static final String createdAtField = "createdAt";
private static final String deploymentSpecField = "deploymentSpecField";
private static final String validationOverridesField = "validationOverrides";
private static final String instancesField = "instances";
private static final String deployingField = "deployingField";
private static final String projectIdField = "projectId";
private static final String latestVersionField = "latestVersion";
private static final String pinnedField = "pinned";
private static final String deploymentIssueField = "deploymentIssueId";
private static final String ownershipIssueIdField = "ownershipIssueId";
private static final String ownerField = "confirmedOwner";
private static final String majorVersionField = "majorVersion";
private static final String writeQualityField = "writeQuality";
private static final String queryQualityField = "queryQuality";
private static final String pemDeployKeysField = "pemDeployKeys";
private static final String assignedRotationClusterField = "clusterId";
private static final String assignedRotationRotationField = "rotationId";
private static final String versionField = "version";
private static final String instanceNameField = "instanceName";
private static final String deploymentsField = "deployments";
private static final String deploymentJobsField = "deploymentJobs";
private static final String assignedRotationsField = "assignedRotations";
private static final String assignedRotationEndpointField = "endpointId";
private static final String zoneField = "zone";
private static final String environmentField = "environment";
private static final String regionField = "region";
private static final String deployTimeField = "deployTime";
private static final String applicationBuildNumberField = "applicationBuildNumber";
private static final String applicationPackageRevisionField = "applicationPackageRevision";
private static final String sourceRevisionField = "sourceRevision";
private static final String repositoryField = "repositoryField";
private static final String branchField = "branchField";
private static final String commitField = "commitField";
private static final String authorEmailField = "authorEmailField";
private static final String compileVersionField = "compileVersion";
private static final String buildTimeField = "buildTime";
private static final String sourceUrlField = "sourceUrl";
private static final String lastQueriedField = "lastQueried";
private static final String lastWrittenField = "lastWritten";
private static final String lastQueriesPerSecondField = "lastQueriesPerSecond";
private static final String lastWritesPerSecondField = "lastWritesPerSecond";
private static final String jobStatusField = "jobStatus";
private static final String jobTypeField = "jobType";
private static final String pausedUntilField = "pausedUntil";
private static final String clusterInfoField = "clusterInfo";
private static final String clusterInfoFlavorField = "flavor";
private static final String clusterInfoCostField = "cost";
private static final String clusterInfoCpuField = "flavorCpu";
private static final String clusterInfoMemField = "flavorMem";
private static final String clusterInfoDiskField = "flavorDisk";
private static final String clusterInfoTypeField = "clusterType";
private static final String clusterInfoHostnamesField = "hostnames";
private static final String deploymentMetricsField = "metrics";
private static final String deploymentMetricsQPSField = "queriesPerSecond";
private static final String deploymentMetricsWPSField = "writesPerSecond";
private static final String deploymentMetricsDocsField = "documentCount";
private static final String deploymentMetricsQueryLatencyField = "queryLatencyMillis";
private static final String deploymentMetricsWriteLatencyField = "writeLatencyMillis";
private static final String deploymentMetricsUpdateTime = "lastUpdated";
private static final String deploymentMetricsWarningsField = "warnings";
private static final String rotationStatusField = "rotationStatus2";
private static final String rotationIdField = "rotationId";
private static final String lastUpdatedField = "lastUpdated";
private static final String rotationStateField = "state";
private static final String statusField = "status";
private final Cache<Long, Application> cache = CacheBuilder.newBuilder().maximumSize(1000).build();
public Slime toSlime(Application application) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString(idField, application.id().serialized());
root.setLong(createdAtField, application.createdAt().toEpochMilli());
root.setString(deploymentSpecField, application.deploymentSpec().xmlForm());
root.setString(validationOverridesField, application.validationOverrides().xmlForm());
application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId));
application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value()));
application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value()));
application.owner().ifPresent(owner -> root.setString(ownerField, owner.username()));
application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion));
root.setDouble(queryQualityField, application.metrics().queryServiceQuality());
root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
deployKeysToSlime(application.deployKeys(), root.setArray(pemDeployKeysField));
application.latestVersion().ifPresent(version -> toSlime(version, root.setObject(latestVersionField)));
instancesToSlime(application, root.setArray(instancesField));
return slime;
}
private void instancesToSlime(Application application, Cursor array) {
for (Instance instance : application.instances().values()) {
Cursor instanceObject = array.addObject();
instanceObject.setString(instanceNameField, instance.name().value());
deploymentsToSlime(instance.deployments().values(), instanceObject.setArray(deploymentsField));
toSlime(instance.jobPauses(), instanceObject.setObject(deploymentJobsField));
assignedRotationsToSlime(instance.rotations(), instanceObject, assignedRotationsField);
toSlime(instance.rotationStatus(), instanceObject.setArray(rotationStatusField));
toSlime(instance.change(), instanceObject, deployingField);
}
}
private void deployKeysToSlime(Set<PublicKey> deployKeys, Cursor array) {
deployKeys.forEach(key -> array.addString(KeyUtils.toPem(key)));
}
private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) {
for (Deployment deployment : deployments)
deploymentToSlime(deployment, array.addObject());
}
private void deploymentToSlime(Deployment deployment, Cursor object) {
zoneIdToSlime(deployment.zone(), object.setObject(zoneField));
object.setString(versionField, deployment.version().toString());
object.setLong(deployTimeField, deployment.at().toEpochMilli());
toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField));
clusterInfoToSlime(deployment.clusterInfo(), object);
deploymentMetricsToSlime(deployment.metrics(), object);
deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value));
}
private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) {
Cursor root = object.setObject(deploymentMetricsField);
root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond());
root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond());
root.setDouble(deploymentMetricsDocsField, metrics.documentCount());
root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis());
root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli()));
if (!metrics.warnings().isEmpty()) {
Cursor warningsObject = root.setObject(deploymentMetricsWarningsField);
metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count));
}
}
private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) {
Cursor root = object.setObject(clusterInfoField);
for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) {
toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
}
}
private void toSlime(ClusterInfo info, Cursor object) {
object.setString(clusterInfoFlavorField, info.getFlavor());
object.setLong(clusterInfoCostField, info.getFlavorCost());
object.setDouble(clusterInfoCpuField, info.getFlavorCPU());
object.setDouble(clusterInfoMemField, info.getFlavorMem());
object.setDouble(clusterInfoDiskField, info.getFlavorDisk());
object.setString(clusterInfoTypeField, info.getClusterType().name());
Cursor array = object.setArray(clusterInfoHostnamesField);
for (String host : info.getHostnames()) {
array.addString(host);
}
}
private void zoneIdToSlime(ZoneId zone, Cursor object) {
object.setString(environmentField, zone.environment().value());
object.setString(regionField, zone.region().value());
}
private void toSlime(SourceRevision sourceRevision, Cursor object) {
object.setString(repositoryField, sourceRevision.repository());
object.setString(branchField, sourceRevision.branch());
object.setString(commitField, sourceRevision.commit());
}
private void toSlime(Map<JobType, Instant> jobPauses, Cursor cursor) {
Cursor jobStatusArray = cursor.setArray(jobStatusField);
jobPauses.forEach((type, until) -> {
Cursor jobPauseObject = jobStatusArray.addObject();
jobPauseObject.setString(jobTypeField, type.jobName());
jobPauseObject.setLong(pausedUntilField, until.toEpochMilli());
});
}
private void toSlime(Change deploying, Cursor parentObject, String fieldName) {
if (deploying.isEmpty()) return;
Cursor object = parentObject.setObject(fieldName);
if (deploying.platform().isPresent())
object.setString(versionField, deploying.platform().get().toString());
if (deploying.application().isPresent())
toSlime(deploying.application().get(), object);
if (deploying.isPinned())
object.setBool(pinnedField, true);
}
private void toSlime(RotationStatus status, Cursor array) {
status.asMap().forEach((rotationId, targets) -> {
Cursor rotationObject = array.addObject();
rotationObject.setString(rotationIdField, rotationId.asString());
rotationObject.setLong(lastUpdatedField, targets.lastUpdated().toEpochMilli());
Cursor statusArray = rotationObject.setArray(statusField);
targets.asMap().forEach((zone, state) -> {
Cursor statusObject = statusArray.addObject();
zoneIdToSlime(zone, statusObject);
statusObject.setString(rotationStateField, state.name());
});
});
}
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) {
var rotationsArray = parent.setArray(fieldName);
for (var rotation : rotations) {
var object = rotationsArray.addObject();
object.setString(assignedRotationEndpointField, rotation.endpointId().id());
object.setString(assignedRotationRotationField, rotation.rotationId().asString());
object.setString(assignedRotationClusterField, rotation.clusterId().value());
}
}
public Application fromSlime(byte[] data) {
var key = Hashing.sipHash24().hashBytes(data).asLong();
try {
return cache.get(key, () -> fromSlime(SlimeUtils.jsonToSlime(data)));
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
private Application fromSlime(Slime slime) {
Inspector root = slime.get();
TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString());
Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong());
DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false);
ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
Optional<IssueId> deploymentIssueId = Serializers.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
Optional<IssueId> ownershipIssueId = Serializers.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
Optional<User> owner = Serializers.optionalString(root.field(ownerField)).map(User::from);
OptionalInt majorVersion = Serializers.optionalInteger(root.field(majorVersionField));
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
Set<PublicKey> deployKeys = deployKeysFromSlime(root.field(pemDeployKeysField));
List<Instance> instances = instancesFromSlime(id, deploymentSpec, root.field(instancesField));
OptionalLong projectId = Serializers.optionalLong(root.field(projectIdField));
Optional<ApplicationVersion> latestVersion = latestVersionFromSlime(root.field(latestVersionField));
return new Application(id, createdAt, deploymentSpec, validationOverrides,
deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics,
deployKeys, projectId, latestVersion, instances);
}
private Optional<ApplicationVersion> latestVersionFromSlime(Inspector latestVersionObject) {
if (latestVersionObject.valid())
return Optional.of(applicationVersionFromSlime(latestVersionObject));
return Optional.empty();
}
private List<Instance> instancesFromSlime(TenantAndApplicationId id, DeploymentSpec deploymentSpec, Inspector field) {
List<Instance> instances = new ArrayList<>();
field.traverse((ArrayTraverser) (name, object) -> {
InstanceName instanceName = InstanceName.from(object.field(instanceNameField).asString());
List<Deployment> deployments = deploymentsFromSlime(object.field(deploymentsField));
Map<JobType, Instant> jobPauses = jobPausesFromSlime(object.field(deploymentJobsField));
List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, instanceName, object);
RotationStatus rotationStatus = rotationStatusFromSlime(object);
Change change = changeFromSlime(object.field(deployingField));
instances.add(new Instance(id.instance(instanceName),
deployments,
jobPauses,
assignedRotations,
rotationStatus,
change));
});
return instances;
}
private Set<PublicKey> deployKeysFromSlime(Inspector array) {
Set<PublicKey> keys = new LinkedHashSet<>();
array.traverse((ArrayTraverser) (__, key) -> keys.add(KeyUtils.fromPemEncodedPublicKey(key.asString())));
return keys;
}
private List<Deployment> deploymentsFromSlime(Inspector array) {
List<Deployment> deployments = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item)));
return deployments;
}
private Deployment deploymentFromSlime(Inspector deploymentObject) {
return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)),
applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)),
Version.fromString(deploymentObject.field(versionField).asString()),
Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()),
clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)),
deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)),
DeploymentActivity.create(Serializers.optionalInstant(deploymentObject.field(lastQueriedField)),
Serializers.optionalInstant(deploymentObject.field(lastWrittenField)),
Serializers.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)),
Serializers.optionalDouble(deploymentObject.field(lastWritesPerSecondField))));
}
private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) {
Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ?
Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) :
Optional.empty();
return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(),
object.field(deploymentMetricsWPSField).asDouble(),
object.field(deploymentMetricsDocsField).asDouble(),
object.field(deploymentMetricsQueryLatencyField).asDouble(),
object.field(deploymentMetricsWriteLatencyField).asDouble(),
instant,
deploymentWarningsFrom(object.field(deploymentMetricsWarningsField)));
}
private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) {
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name),
(int) value.asLong()));
return Collections.unmodifiableMap(warnings);
}
private RotationStatus rotationStatusFromSlime(Inspector parentObject) {
var object = parentObject.field(rotationStatusField);
var statusMap = new LinkedHashMap<RotationId, RotationStatus.Targets>();
object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()),
new RotationStatus.Targets(
singleRotationStatusFromSlime(statusObject.field(statusField)),
Instant.ofEpochMilli(statusObject.field(lastUpdatedField).asLong()))));
return RotationStatus.from(statusMap);
}
private Map<ZoneId, RotationState> singleRotationStatusFromSlime(Inspector object) {
if (!object.valid()) {
return Collections.emptyMap();
}
Map<ZoneId, RotationState> rotationStatus = new LinkedHashMap<>();
object.traverse((ArrayTraverser) (idx, statusObject) -> {
var zone = zoneIdFromSlime(statusObject);
var status = RotationState.valueOf(statusObject.field(rotationStateField).asString());
rotationStatus.put(zone, status);
});
return Collections.unmodifiableMap(rotationStatus);
}
private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) {
Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>();
object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value)));
return map;
}
private ClusterInfo clusterInfoFromSlime(Inspector inspector) {
String flavor = inspector.field(clusterInfoFlavorField).asString();
int cost = (int)inspector.field(clusterInfoCostField).asLong();
String type = inspector.field(clusterInfoTypeField).asString();
double flavorCpu = inspector.field(clusterInfoCpuField).asDouble();
double flavorMem = inspector.field(clusterInfoMemField).asDouble();
double flavorDisk = inspector.field(clusterInfoDiskField).asDouble();
List<String> hostnames = new ArrayList<>();
inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString()));
return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames);
}
private ZoneId zoneIdFromSlime(Inspector object) {
return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString());
}
private ApplicationVersion applicationVersionFromSlime(Inspector object) {
if ( ! object.valid()) return ApplicationVersion.unknown;
OptionalLong applicationBuildNumber = Serializers.optionalLong(object.field(applicationBuildNumberField));
if (applicationBuildNumber.isEmpty())
return ApplicationVersion.unknown;
Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField));
Optional<String> authorEmail = Serializers.optionalString(object.field(authorEmailField));
Optional<Version> compileVersion = Serializers.optionalString(object.field(compileVersionField)).map(Version::fromString);
Optional<Instant> buildTime = Serializers.optionalInstant(object.field(buildTimeField));
Optional<String> sourceUrl = Serializers.optionalString(object.field(sourceUrlField));
Optional<String> commit = Serializers.optionalString(object.field(commitField));
return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit);
}
private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
var repository = object.field(repositoryField).asString();
var branch = object.field(branchField).asString();
var commit = object.field(commitField).asString();
if (repository.isBlank() && branch.isBlank() && commit.isBlank()) return Optional.empty();
return Optional.of(new SourceRevision(repository, branch, commit));
}
private Map<JobType, Instant> jobPausesFromSlime(Inspector object) {
Map<JobType, Instant> jobPauses = new HashMap<>();
object.field(jobStatusField).traverse((ArrayTraverser) (__, jobPauseObject) ->
JobType.fromOptionalJobName(jobPauseObject.field(jobTypeField).asString())
.ifPresent(jobType -> jobPauses.put(jobType,
Instant.ofEpochMilli(jobPauseObject.field(pausedUntilField).asLong()))));
return jobPauses;
}
private Change changeFromSlime(Inspector object) {
if ( ! object.valid()) return Change.empty();
Inspector versionFieldValue = object.field(versionField);
Change change = Change.empty();
if (versionFieldValue.valid())
change = Change.of(Version.fromString(versionFieldValue.asString()));
if (object.field(applicationBuildNumberField).valid())
change = change.with(applicationVersionFromSlime(object));
if (object.field(pinnedField).asBool())
change = change.withPin();
return change;
}
private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, InstanceName instance, Inspector root) {
var assignedRotations = new LinkedHashMap<EndpointId, AssignedRotation>();
root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> {
var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString());
var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString());
var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString());
var regions = deploymentSpec.instance(instance)
.map(spec -> globalEndpointRegions(spec, endpointId))
.orElse(Set.of());
assignedRotations.putIfAbsent(endpointId, new AssignedRotation(clusterId, endpointId, rotationId, regions));
});
return List.copyOf(assignedRotations.values());
}
private Set<RegionName> globalEndpointRegions(DeploymentInstanceSpec spec, EndpointId endpointId) {
if (spec.globalServiceId().isPresent())
return spec.zones().stream()
.flatMap(zone -> zone.region().stream())
.collect(Collectors.toSet());
return spec.endpoints().stream()
.filter(endpoint -> endpoint.endpointId().equals(endpointId.id()))
.flatMap(endpoint -> endpoint.regions().stream())
.collect(Collectors.toSet());
}
} | class ApplicationSerializer {
private static final String idField = "id";
private static final String createdAtField = "createdAt";
private static final String deploymentSpecField = "deploymentSpecField";
private static final String validationOverridesField = "validationOverrides";
private static final String instancesField = "instances";
private static final String deployingField = "deployingField";
private static final String projectIdField = "projectId";
private static final String latestVersionField = "latestVersion";
private static final String pinnedField = "pinned";
private static final String deploymentIssueField = "deploymentIssueId";
private static final String ownershipIssueIdField = "ownershipIssueId";
private static final String ownerField = "confirmedOwner";
private static final String majorVersionField = "majorVersion";
private static final String writeQualityField = "writeQuality";
private static final String queryQualityField = "queryQuality";
private static final String pemDeployKeysField = "pemDeployKeys";
private static final String assignedRotationClusterField = "clusterId";
private static final String assignedRotationRotationField = "rotationId";
private static final String versionField = "version";
private static final String instanceNameField = "instanceName";
private static final String deploymentsField = "deployments";
private static final String deploymentJobsField = "deploymentJobs";
private static final String assignedRotationsField = "assignedRotations";
private static final String assignedRotationEndpointField = "endpointId";
private static final String zoneField = "zone";
private static final String environmentField = "environment";
private static final String regionField = "region";
private static final String deployTimeField = "deployTime";
private static final String applicationBuildNumberField = "applicationBuildNumber";
private static final String applicationPackageRevisionField = "applicationPackageRevision";
private static final String sourceRevisionField = "sourceRevision";
private static final String repositoryField = "repositoryField";
private static final String branchField = "branchField";
private static final String commitField = "commitField";
private static final String authorEmailField = "authorEmailField";
private static final String compileVersionField = "compileVersion";
private static final String buildTimeField = "buildTime";
private static final String sourceUrlField = "sourceUrl";
private static final String lastQueriedField = "lastQueried";
private static final String lastWrittenField = "lastWritten";
private static final String lastQueriesPerSecondField = "lastQueriesPerSecond";
private static final String lastWritesPerSecondField = "lastWritesPerSecond";
private static final String jobStatusField = "jobStatus";
private static final String jobTypeField = "jobType";
private static final String pausedUntilField = "pausedUntil";
private static final String clusterInfoField = "clusterInfo";
private static final String clusterInfoFlavorField = "flavor";
private static final String clusterInfoCostField = "cost";
private static final String clusterInfoCpuField = "flavorCpu";
private static final String clusterInfoMemField = "flavorMem";
private static final String clusterInfoDiskField = "flavorDisk";
private static final String clusterInfoTypeField = "clusterType";
private static final String clusterInfoHostnamesField = "hostnames";
private static final String deploymentMetricsField = "metrics";
private static final String deploymentMetricsQPSField = "queriesPerSecond";
private static final String deploymentMetricsWPSField = "writesPerSecond";
private static final String deploymentMetricsDocsField = "documentCount";
private static final String deploymentMetricsQueryLatencyField = "queryLatencyMillis";
private static final String deploymentMetricsWriteLatencyField = "writeLatencyMillis";
private static final String deploymentMetricsUpdateTime = "lastUpdated";
private static final String deploymentMetricsWarningsField = "warnings";
private static final String rotationStatusField = "rotationStatus2";
private static final String rotationIdField = "rotationId";
private static final String lastUpdatedField = "lastUpdated";
private static final String rotationStateField = "state";
private static final String statusField = "status";
private final Cache<Long, Application> cache = CacheBuilder.newBuilder().maximumSize(1000).build();
public Slime toSlime(Application application) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString(idField, application.id().serialized());
root.setLong(createdAtField, application.createdAt().toEpochMilli());
root.setString(deploymentSpecField, application.deploymentSpec().xmlForm());
root.setString(validationOverridesField, application.validationOverrides().xmlForm());
application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId));
application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value()));
application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value()));
application.owner().ifPresent(owner -> root.setString(ownerField, owner.username()));
application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion));
root.setDouble(queryQualityField, application.metrics().queryServiceQuality());
root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
deployKeysToSlime(application.deployKeys(), root.setArray(pemDeployKeysField));
application.latestVersion().ifPresent(version -> toSlime(version, root.setObject(latestVersionField)));
instancesToSlime(application, root.setArray(instancesField));
return slime;
}
private void instancesToSlime(Application application, Cursor array) {
for (Instance instance : application.instances().values()) {
Cursor instanceObject = array.addObject();
instanceObject.setString(instanceNameField, instance.name().value());
deploymentsToSlime(instance.deployments().values(), instanceObject.setArray(deploymentsField));
toSlime(instance.jobPauses(), instanceObject.setObject(deploymentJobsField));
assignedRotationsToSlime(instance.rotations(), instanceObject, assignedRotationsField);
toSlime(instance.rotationStatus(), instanceObject.setArray(rotationStatusField));
toSlime(instance.change(), instanceObject, deployingField);
}
}
private void deployKeysToSlime(Set<PublicKey> deployKeys, Cursor array) {
deployKeys.forEach(key -> array.addString(KeyUtils.toPem(key)));
}
private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) {
for (Deployment deployment : deployments)
deploymentToSlime(deployment, array.addObject());
}
private void deploymentToSlime(Deployment deployment, Cursor object) {
zoneIdToSlime(deployment.zone(), object.setObject(zoneField));
object.setString(versionField, deployment.version().toString());
object.setLong(deployTimeField, deployment.at().toEpochMilli());
toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField));
clusterInfoToSlime(deployment.clusterInfo(), object);
deploymentMetricsToSlime(deployment.metrics(), object);
deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value));
}
private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) {
Cursor root = object.setObject(deploymentMetricsField);
root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond());
root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond());
root.setDouble(deploymentMetricsDocsField, metrics.documentCount());
root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis());
root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli()));
if (!metrics.warnings().isEmpty()) {
Cursor warningsObject = root.setObject(deploymentMetricsWarningsField);
metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count));
}
}
private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) {
Cursor root = object.setObject(clusterInfoField);
for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) {
toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
}
}
private void toSlime(ClusterInfo info, Cursor object) {
object.setString(clusterInfoFlavorField, info.getFlavor());
object.setLong(clusterInfoCostField, info.getFlavorCost());
object.setDouble(clusterInfoCpuField, info.getFlavorCPU());
object.setDouble(clusterInfoMemField, info.getFlavorMem());
object.setDouble(clusterInfoDiskField, info.getFlavorDisk());
object.setString(clusterInfoTypeField, info.getClusterType().name());
Cursor array = object.setArray(clusterInfoHostnamesField);
for (String host : info.getHostnames()) {
array.addString(host);
}
}
private void zoneIdToSlime(ZoneId zone, Cursor object) {
object.setString(environmentField, zone.environment().value());
object.setString(regionField, zone.region().value());
}
private void toSlime(SourceRevision sourceRevision, Cursor object) {
object.setString(repositoryField, sourceRevision.repository());
object.setString(branchField, sourceRevision.branch());
object.setString(commitField, sourceRevision.commit());
}
private void toSlime(Map<JobType, Instant> jobPauses, Cursor cursor) {
Cursor jobStatusArray = cursor.setArray(jobStatusField);
jobPauses.forEach((type, until) -> {
Cursor jobPauseObject = jobStatusArray.addObject();
jobPauseObject.setString(jobTypeField, type.jobName());
jobPauseObject.setLong(pausedUntilField, until.toEpochMilli());
});
}
private void toSlime(Change deploying, Cursor parentObject, String fieldName) {
if (deploying.isEmpty()) return;
Cursor object = parentObject.setObject(fieldName);
if (deploying.platform().isPresent())
object.setString(versionField, deploying.platform().get().toString());
if (deploying.application().isPresent())
toSlime(deploying.application().get(), object);
if (deploying.isPinned())
object.setBool(pinnedField, true);
}
private void toSlime(RotationStatus status, Cursor array) {
status.asMap().forEach((rotationId, targets) -> {
Cursor rotationObject = array.addObject();
rotationObject.setString(rotationIdField, rotationId.asString());
rotationObject.setLong(lastUpdatedField, targets.lastUpdated().toEpochMilli());
Cursor statusArray = rotationObject.setArray(statusField);
targets.asMap().forEach((zone, state) -> {
Cursor statusObject = statusArray.addObject();
zoneIdToSlime(zone, statusObject);
statusObject.setString(rotationStateField, state.name());
});
});
}
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) {
var rotationsArray = parent.setArray(fieldName);
for (var rotation : rotations) {
var object = rotationsArray.addObject();
object.setString(assignedRotationEndpointField, rotation.endpointId().id());
object.setString(assignedRotationRotationField, rotation.rotationId().asString());
object.setString(assignedRotationClusterField, rotation.clusterId().value());
}
}
public Application fromSlime(byte[] data) {
var key = Hashing.sipHash24().hashBytes(data).asLong();
try {
return cache.get(key, () -> fromSlime(SlimeUtils.jsonToSlime(data)));
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
private Application fromSlime(Slime slime) {
Inspector root = slime.get();
TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString());
Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong());
DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false);
ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
Optional<IssueId> deploymentIssueId = Serializers.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
Optional<IssueId> ownershipIssueId = Serializers.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
Optional<User> owner = Serializers.optionalString(root.field(ownerField)).map(User::from);
OptionalInt majorVersion = Serializers.optionalInteger(root.field(majorVersionField));
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
Set<PublicKey> deployKeys = deployKeysFromSlime(root.field(pemDeployKeysField));
List<Instance> instances = instancesFromSlime(id, deploymentSpec, root.field(instancesField));
OptionalLong projectId = Serializers.optionalLong(root.field(projectIdField));
Optional<ApplicationVersion> latestVersion = latestVersionFromSlime(root.field(latestVersionField));
return new Application(id, createdAt, deploymentSpec, validationOverrides,
deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics,
deployKeys, projectId, latestVersion, instances);
}
private Optional<ApplicationVersion> latestVersionFromSlime(Inspector latestVersionObject) {
if (latestVersionObject.valid())
return Optional.of(applicationVersionFromSlime(latestVersionObject));
return Optional.empty();
}
private List<Instance> instancesFromSlime(TenantAndApplicationId id, DeploymentSpec deploymentSpec, Inspector field) {
List<Instance> instances = new ArrayList<>();
field.traverse((ArrayTraverser) (name, object) -> {
InstanceName instanceName = InstanceName.from(object.field(instanceNameField).asString());
List<Deployment> deployments = deploymentsFromSlime(object.field(deploymentsField));
Map<JobType, Instant> jobPauses = jobPausesFromSlime(object.field(deploymentJobsField));
List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, instanceName, object);
RotationStatus rotationStatus = rotationStatusFromSlime(object);
Change change = changeFromSlime(object.field(deployingField));
instances.add(new Instance(id.instance(instanceName),
deployments,
jobPauses,
assignedRotations,
rotationStatus,
change));
});
return instances;
}
private Set<PublicKey> deployKeysFromSlime(Inspector array) {
Set<PublicKey> keys = new LinkedHashSet<>();
array.traverse((ArrayTraverser) (__, key) -> keys.add(KeyUtils.fromPemEncodedPublicKey(key.asString())));
return keys;
}
private List<Deployment> deploymentsFromSlime(Inspector array) {
List<Deployment> deployments = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item)));
return deployments;
}
private Deployment deploymentFromSlime(Inspector deploymentObject) {
return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)),
applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)),
Version.fromString(deploymentObject.field(versionField).asString()),
Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()),
clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)),
deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)),
DeploymentActivity.create(Serializers.optionalInstant(deploymentObject.field(lastQueriedField)),
Serializers.optionalInstant(deploymentObject.field(lastWrittenField)),
Serializers.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)),
Serializers.optionalDouble(deploymentObject.field(lastWritesPerSecondField))));
}
private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) {
Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ?
Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) :
Optional.empty();
return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(),
object.field(deploymentMetricsWPSField).asDouble(),
object.field(deploymentMetricsDocsField).asDouble(),
object.field(deploymentMetricsQueryLatencyField).asDouble(),
object.field(deploymentMetricsWriteLatencyField).asDouble(),
instant,
deploymentWarningsFrom(object.field(deploymentMetricsWarningsField)));
}
private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) {
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name),
(int) value.asLong()));
return Collections.unmodifiableMap(warnings);
}
private RotationStatus rotationStatusFromSlime(Inspector parentObject) {
var object = parentObject.field(rotationStatusField);
var statusMap = new LinkedHashMap<RotationId, RotationStatus.Targets>();
object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()),
new RotationStatus.Targets(
singleRotationStatusFromSlime(statusObject.field(statusField)),
Instant.ofEpochMilli(statusObject.field(lastUpdatedField).asLong()))));
return RotationStatus.from(statusMap);
}
private Map<ZoneId, RotationState> singleRotationStatusFromSlime(Inspector object) {
if (!object.valid()) {
return Collections.emptyMap();
}
Map<ZoneId, RotationState> rotationStatus = new LinkedHashMap<>();
object.traverse((ArrayTraverser) (idx, statusObject) -> {
var zone = zoneIdFromSlime(statusObject);
var status = RotationState.valueOf(statusObject.field(rotationStateField).asString());
rotationStatus.put(zone, status);
});
return Collections.unmodifiableMap(rotationStatus);
}
private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) {
Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>();
object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value)));
return map;
}
private ClusterInfo clusterInfoFromSlime(Inspector inspector) {
String flavor = inspector.field(clusterInfoFlavorField).asString();
int cost = (int)inspector.field(clusterInfoCostField).asLong();
String type = inspector.field(clusterInfoTypeField).asString();
double flavorCpu = inspector.field(clusterInfoCpuField).asDouble();
double flavorMem = inspector.field(clusterInfoMemField).asDouble();
double flavorDisk = inspector.field(clusterInfoDiskField).asDouble();
List<String> hostnames = new ArrayList<>();
inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString()));
return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames);
}
private ZoneId zoneIdFromSlime(Inspector object) {
return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString());
}
private ApplicationVersion applicationVersionFromSlime(Inspector object) {
if ( ! object.valid()) return ApplicationVersion.unknown;
OptionalLong applicationBuildNumber = Serializers.optionalLong(object.field(applicationBuildNumberField));
if (applicationBuildNumber.isEmpty())
return ApplicationVersion.unknown;
Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField));
Optional<String> authorEmail = Serializers.optionalString(object.field(authorEmailField));
Optional<Version> compileVersion = Serializers.optionalString(object.field(compileVersionField)).map(Version::fromString);
Optional<Instant> buildTime = Serializers.optionalInstant(object.field(buildTimeField));
Optional<String> sourceUrl = Serializers.optionalString(object.field(sourceUrlField));
Optional<String> commit = Serializers.optionalString(object.field(commitField));
return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit);
}
private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
var repository = object.field(repositoryField).asString();
var branch = object.field(branchField).asString();
var commit = object.field(commitField).asString();
if (repository.isBlank() && branch.isBlank() && commit.isBlank()) return Optional.empty();
return Optional.of(new SourceRevision(repository, branch, commit));
}
private Map<JobType, Instant> jobPausesFromSlime(Inspector object) {
Map<JobType, Instant> jobPauses = new HashMap<>();
object.field(jobStatusField).traverse((ArrayTraverser) (__, jobPauseObject) ->
JobType.fromOptionalJobName(jobPauseObject.field(jobTypeField).asString())
.ifPresent(jobType -> jobPauses.put(jobType,
Instant.ofEpochMilli(jobPauseObject.field(pausedUntilField).asLong()))));
return jobPauses;
}
private Change changeFromSlime(Inspector object) {
if ( ! object.valid()) return Change.empty();
Inspector versionFieldValue = object.field(versionField);
Change change = Change.empty();
if (versionFieldValue.valid())
change = Change.of(Version.fromString(versionFieldValue.asString()));
if (object.field(applicationBuildNumberField).valid())
change = change.with(applicationVersionFromSlime(object));
if (object.field(pinnedField).asBool())
change = change.withPin();
return change;
}
private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, InstanceName instance, Inspector root) {
var assignedRotations = new LinkedHashMap<EndpointId, AssignedRotation>();
root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> {
var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString());
var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString());
var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString());
var regions = deploymentSpec.instance(instance)
.map(spec -> globalEndpointRegions(spec, endpointId))
.orElse(Set.of());
assignedRotations.putIfAbsent(endpointId, new AssignedRotation(clusterId, endpointId, rotationId, regions));
});
return List.copyOf(assignedRotations.values());
}
private Set<RegionName> globalEndpointRegions(DeploymentInstanceSpec spec, EndpointId endpointId) {
if (spec.globalServiceId().isPresent())
return spec.zones().stream()
.flatMap(zone -> zone.region().stream())
.collect(Collectors.toSet());
return spec.endpoints().stream()
.filter(endpoint -> endpoint.endpointId().equals(endpointId.id()))
.flatMap(endpoint -> endpoint.regions().stream())
.collect(Collectors.toSet());
}
} |
> Or did you clean them out by hand? Yes. > Are the fields all empty? In the production data, `sourceRevision` is either a) `null`, b) an object with all non-empty fields or c) an object with all empty fields. | private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber));
applicationVersion.source().ifPresent(source -> toSlime(source, object.setObject(sourceRevisionField)));
applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email));
applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString()));
applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli()));
applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url));
applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit));
} | applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber)); | private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber));
applicationVersion.source().ifPresent(source -> toSlime(source, object.setObject(sourceRevisionField)));
applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email));
applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString()));
applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli()));
applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url));
applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit));
} | class ApplicationSerializer {
private static final String idField = "id";
private static final String createdAtField = "createdAt";
private static final String deploymentSpecField = "deploymentSpecField";
private static final String validationOverridesField = "validationOverrides";
private static final String instancesField = "instances";
private static final String deployingField = "deployingField";
private static final String projectIdField = "projectId";
private static final String latestVersionField = "latestVersion";
private static final String pinnedField = "pinned";
private static final String deploymentIssueField = "deploymentIssueId";
private static final String ownershipIssueIdField = "ownershipIssueId";
private static final String ownerField = "confirmedOwner";
private static final String majorVersionField = "majorVersion";
private static final String writeQualityField = "writeQuality";
private static final String queryQualityField = "queryQuality";
private static final String pemDeployKeysField = "pemDeployKeys";
private static final String assignedRotationClusterField = "clusterId";
private static final String assignedRotationRotationField = "rotationId";
private static final String versionField = "version";
private static final String instanceNameField = "instanceName";
private static final String deploymentsField = "deployments";
private static final String deploymentJobsField = "deploymentJobs";
private static final String assignedRotationsField = "assignedRotations";
private static final String assignedRotationEndpointField = "endpointId";
private static final String zoneField = "zone";
private static final String environmentField = "environment";
private static final String regionField = "region";
private static final String deployTimeField = "deployTime";
private static final String applicationBuildNumberField = "applicationBuildNumber";
private static final String applicationPackageRevisionField = "applicationPackageRevision";
private static final String sourceRevisionField = "sourceRevision";
private static final String repositoryField = "repositoryField";
private static final String branchField = "branchField";
private static final String commitField = "commitField";
private static final String authorEmailField = "authorEmailField";
private static final String compileVersionField = "compileVersion";
private static final String buildTimeField = "buildTime";
private static final String sourceUrlField = "sourceUrl";
private static final String lastQueriedField = "lastQueried";
private static final String lastWrittenField = "lastWritten";
private static final String lastQueriesPerSecondField = "lastQueriesPerSecond";
private static final String lastWritesPerSecondField = "lastWritesPerSecond";
private static final String jobStatusField = "jobStatus";
private static final String jobTypeField = "jobType";
private static final String pausedUntilField = "pausedUntil";
private static final String clusterInfoField = "clusterInfo";
private static final String clusterInfoFlavorField = "flavor";
private static final String clusterInfoCostField = "cost";
private static final String clusterInfoCpuField = "flavorCpu";
private static final String clusterInfoMemField = "flavorMem";
private static final String clusterInfoDiskField = "flavorDisk";
private static final String clusterInfoTypeField = "clusterType";
private static final String clusterInfoHostnamesField = "hostnames";
private static final String deploymentMetricsField = "metrics";
private static final String deploymentMetricsQPSField = "queriesPerSecond";
private static final String deploymentMetricsWPSField = "writesPerSecond";
private static final String deploymentMetricsDocsField = "documentCount";
private static final String deploymentMetricsQueryLatencyField = "queryLatencyMillis";
private static final String deploymentMetricsWriteLatencyField = "writeLatencyMillis";
private static final String deploymentMetricsUpdateTime = "lastUpdated";
private static final String deploymentMetricsWarningsField = "warnings";
private static final String rotationStatusField = "rotationStatus2";
private static final String rotationIdField = "rotationId";
private static final String lastUpdatedField = "lastUpdated";
private static final String rotationStateField = "state";
private static final String statusField = "status";
private final Cache<Long, Application> cache = CacheBuilder.newBuilder().maximumSize(1000).build();
public Slime toSlime(Application application) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString(idField, application.id().serialized());
root.setLong(createdAtField, application.createdAt().toEpochMilli());
root.setString(deploymentSpecField, application.deploymentSpec().xmlForm());
root.setString(validationOverridesField, application.validationOverrides().xmlForm());
application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId));
application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value()));
application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value()));
application.owner().ifPresent(owner -> root.setString(ownerField, owner.username()));
application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion));
root.setDouble(queryQualityField, application.metrics().queryServiceQuality());
root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
deployKeysToSlime(application.deployKeys(), root.setArray(pemDeployKeysField));
application.latestVersion().ifPresent(version -> toSlime(version, root.setObject(latestVersionField)));
instancesToSlime(application, root.setArray(instancesField));
return slime;
}
private void instancesToSlime(Application application, Cursor array) {
for (Instance instance : application.instances().values()) {
Cursor instanceObject = array.addObject();
instanceObject.setString(instanceNameField, instance.name().value());
deploymentsToSlime(instance.deployments().values(), instanceObject.setArray(deploymentsField));
toSlime(instance.jobPauses(), instanceObject.setObject(deploymentJobsField));
assignedRotationsToSlime(instance.rotations(), instanceObject, assignedRotationsField);
toSlime(instance.rotationStatus(), instanceObject.setArray(rotationStatusField));
toSlime(instance.change(), instanceObject, deployingField);
}
}
private void deployKeysToSlime(Set<PublicKey> deployKeys, Cursor array) {
deployKeys.forEach(key -> array.addString(KeyUtils.toPem(key)));
}
private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) {
for (Deployment deployment : deployments)
deploymentToSlime(deployment, array.addObject());
}
private void deploymentToSlime(Deployment deployment, Cursor object) {
zoneIdToSlime(deployment.zone(), object.setObject(zoneField));
object.setString(versionField, deployment.version().toString());
object.setLong(deployTimeField, deployment.at().toEpochMilli());
toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField));
clusterInfoToSlime(deployment.clusterInfo(), object);
deploymentMetricsToSlime(deployment.metrics(), object);
deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value));
}
private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) {
Cursor root = object.setObject(deploymentMetricsField);
root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond());
root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond());
root.setDouble(deploymentMetricsDocsField, metrics.documentCount());
root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis());
root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli()));
if (!metrics.warnings().isEmpty()) {
Cursor warningsObject = root.setObject(deploymentMetricsWarningsField);
metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count));
}
}
private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) {
Cursor root = object.setObject(clusterInfoField);
for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) {
toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
}
}
private void toSlime(ClusterInfo info, Cursor object) {
object.setString(clusterInfoFlavorField, info.getFlavor());
object.setLong(clusterInfoCostField, info.getFlavorCost());
object.setDouble(clusterInfoCpuField, info.getFlavorCPU());
object.setDouble(clusterInfoMemField, info.getFlavorMem());
object.setDouble(clusterInfoDiskField, info.getFlavorDisk());
object.setString(clusterInfoTypeField, info.getClusterType().name());
Cursor array = object.setArray(clusterInfoHostnamesField);
for (String host : info.getHostnames()) {
array.addString(host);
}
}
private void zoneIdToSlime(ZoneId zone, Cursor object) {
object.setString(environmentField, zone.environment().value());
object.setString(regionField, zone.region().value());
}
private void toSlime(SourceRevision sourceRevision, Cursor object) {
object.setString(repositoryField, sourceRevision.repository());
object.setString(branchField, sourceRevision.branch());
object.setString(commitField, sourceRevision.commit());
}
private void toSlime(Map<JobType, Instant> jobPauses, Cursor cursor) {
Cursor jobStatusArray = cursor.setArray(jobStatusField);
jobPauses.forEach((type, until) -> {
Cursor jobPauseObject = jobStatusArray.addObject();
jobPauseObject.setString(jobTypeField, type.jobName());
jobPauseObject.setLong(pausedUntilField, until.toEpochMilli());
});
}
private void toSlime(Change deploying, Cursor parentObject, String fieldName) {
if (deploying.isEmpty()) return;
Cursor object = parentObject.setObject(fieldName);
if (deploying.platform().isPresent())
object.setString(versionField, deploying.platform().get().toString());
if (deploying.application().isPresent())
toSlime(deploying.application().get(), object);
if (deploying.isPinned())
object.setBool(pinnedField, true);
}
private void toSlime(RotationStatus status, Cursor array) {
status.asMap().forEach((rotationId, targets) -> {
Cursor rotationObject = array.addObject();
rotationObject.setString(rotationIdField, rotationId.asString());
rotationObject.setLong(lastUpdatedField, targets.lastUpdated().toEpochMilli());
Cursor statusArray = rotationObject.setArray(statusField);
targets.asMap().forEach((zone, state) -> {
Cursor statusObject = statusArray.addObject();
zoneIdToSlime(zone, statusObject);
statusObject.setString(rotationStateField, state.name());
});
});
}
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) {
var rotationsArray = parent.setArray(fieldName);
for (var rotation : rotations) {
var object = rotationsArray.addObject();
object.setString(assignedRotationEndpointField, rotation.endpointId().id());
object.setString(assignedRotationRotationField, rotation.rotationId().asString());
object.setString(assignedRotationClusterField, rotation.clusterId().value());
}
}
public Application fromSlime(byte[] data) {
var key = Hashing.sipHash24().hashBytes(data).asLong();
try {
return cache.get(key, () -> fromSlime(SlimeUtils.jsonToSlime(data)));
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
private Application fromSlime(Slime slime) {
Inspector root = slime.get();
TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString());
Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong());
DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false);
ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
Optional<IssueId> deploymentIssueId = Serializers.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
Optional<IssueId> ownershipIssueId = Serializers.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
Optional<User> owner = Serializers.optionalString(root.field(ownerField)).map(User::from);
OptionalInt majorVersion = Serializers.optionalInteger(root.field(majorVersionField));
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
Set<PublicKey> deployKeys = deployKeysFromSlime(root.field(pemDeployKeysField));
List<Instance> instances = instancesFromSlime(id, deploymentSpec, root.field(instancesField));
OptionalLong projectId = Serializers.optionalLong(root.field(projectIdField));
Optional<ApplicationVersion> latestVersion = latestVersionFromSlime(root.field(latestVersionField));
return new Application(id, createdAt, deploymentSpec, validationOverrides,
deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics,
deployKeys, projectId, latestVersion, instances);
}
private Optional<ApplicationVersion> latestVersionFromSlime(Inspector latestVersionObject) {
if (latestVersionObject.valid())
return Optional.of(applicationVersionFromSlime(latestVersionObject));
return Optional.empty();
}
private List<Instance> instancesFromSlime(TenantAndApplicationId id, DeploymentSpec deploymentSpec, Inspector field) {
List<Instance> instances = new ArrayList<>();
field.traverse((ArrayTraverser) (name, object) -> {
InstanceName instanceName = InstanceName.from(object.field(instanceNameField).asString());
List<Deployment> deployments = deploymentsFromSlime(object.field(deploymentsField));
Map<JobType, Instant> jobPauses = jobPausesFromSlime(object.field(deploymentJobsField));
List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, instanceName, object);
RotationStatus rotationStatus = rotationStatusFromSlime(object);
Change change = changeFromSlime(object.field(deployingField));
instances.add(new Instance(id.instance(instanceName),
deployments,
jobPauses,
assignedRotations,
rotationStatus,
change));
});
return instances;
}
private Set<PublicKey> deployKeysFromSlime(Inspector array) {
Set<PublicKey> keys = new LinkedHashSet<>();
array.traverse((ArrayTraverser) (__, key) -> keys.add(KeyUtils.fromPemEncodedPublicKey(key.asString())));
return keys;
}
private List<Deployment> deploymentsFromSlime(Inspector array) {
List<Deployment> deployments = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item)));
return deployments;
}
private Deployment deploymentFromSlime(Inspector deploymentObject) {
return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)),
applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)),
Version.fromString(deploymentObject.field(versionField).asString()),
Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()),
clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)),
deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)),
DeploymentActivity.create(Serializers.optionalInstant(deploymentObject.field(lastQueriedField)),
Serializers.optionalInstant(deploymentObject.field(lastWrittenField)),
Serializers.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)),
Serializers.optionalDouble(deploymentObject.field(lastWritesPerSecondField))));
}
private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) {
Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ?
Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) :
Optional.empty();
return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(),
object.field(deploymentMetricsWPSField).asDouble(),
object.field(deploymentMetricsDocsField).asDouble(),
object.field(deploymentMetricsQueryLatencyField).asDouble(),
object.field(deploymentMetricsWriteLatencyField).asDouble(),
instant,
deploymentWarningsFrom(object.field(deploymentMetricsWarningsField)));
}
private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) {
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name),
(int) value.asLong()));
return Collections.unmodifiableMap(warnings);
}
private RotationStatus rotationStatusFromSlime(Inspector parentObject) {
var object = parentObject.field(rotationStatusField);
var statusMap = new LinkedHashMap<RotationId, RotationStatus.Targets>();
object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()),
new RotationStatus.Targets(
singleRotationStatusFromSlime(statusObject.field(statusField)),
Instant.ofEpochMilli(statusObject.field(lastUpdatedField).asLong()))));
return RotationStatus.from(statusMap);
}
private Map<ZoneId, RotationState> singleRotationStatusFromSlime(Inspector object) {
if (!object.valid()) {
return Collections.emptyMap();
}
Map<ZoneId, RotationState> rotationStatus = new LinkedHashMap<>();
object.traverse((ArrayTraverser) (idx, statusObject) -> {
var zone = zoneIdFromSlime(statusObject);
var status = RotationState.valueOf(statusObject.field(rotationStateField).asString());
rotationStatus.put(zone, status);
});
return Collections.unmodifiableMap(rotationStatus);
}
private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) {
Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>();
object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value)));
return map;
}
private ClusterInfo clusterInfoFromSlime(Inspector inspector) {
String flavor = inspector.field(clusterInfoFlavorField).asString();
int cost = (int)inspector.field(clusterInfoCostField).asLong();
String type = inspector.field(clusterInfoTypeField).asString();
double flavorCpu = inspector.field(clusterInfoCpuField).asDouble();
double flavorMem = inspector.field(clusterInfoMemField).asDouble();
double flavorDisk = inspector.field(clusterInfoDiskField).asDouble();
List<String> hostnames = new ArrayList<>();
inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString()));
return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames);
}
private ZoneId zoneIdFromSlime(Inspector object) {
return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString());
}
private ApplicationVersion applicationVersionFromSlime(Inspector object) {
if ( ! object.valid()) return ApplicationVersion.unknown;
OptionalLong applicationBuildNumber = Serializers.optionalLong(object.field(applicationBuildNumberField));
if (applicationBuildNumber.isEmpty())
return ApplicationVersion.unknown;
Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField));
Optional<String> authorEmail = Serializers.optionalString(object.field(authorEmailField));
Optional<Version> compileVersion = Serializers.optionalString(object.field(compileVersionField)).map(Version::fromString);
Optional<Instant> buildTime = Serializers.optionalInstant(object.field(buildTimeField));
Optional<String> sourceUrl = Serializers.optionalString(object.field(sourceUrlField));
Optional<String> commit = Serializers.optionalString(object.field(commitField));
return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit);
}
private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
var repository = object.field(repositoryField).asString();
var branch = object.field(branchField).asString();
var commit = object.field(commitField).asString();
if (repository.isBlank() && branch.isBlank() && commit.isBlank()) return Optional.empty();
return Optional.of(new SourceRevision(repository, branch, commit));
}
private Map<JobType, Instant> jobPausesFromSlime(Inspector object) {
Map<JobType, Instant> jobPauses = new HashMap<>();
object.field(jobStatusField).traverse((ArrayTraverser) (__, jobPauseObject) ->
JobType.fromOptionalJobName(jobPauseObject.field(jobTypeField).asString())
.ifPresent(jobType -> jobPauses.put(jobType,
Instant.ofEpochMilli(jobPauseObject.field(pausedUntilField).asLong()))));
return jobPauses;
}
private Change changeFromSlime(Inspector object) {
if ( ! object.valid()) return Change.empty();
Inspector versionFieldValue = object.field(versionField);
Change change = Change.empty();
if (versionFieldValue.valid())
change = Change.of(Version.fromString(versionFieldValue.asString()));
if (object.field(applicationBuildNumberField).valid())
change = change.with(applicationVersionFromSlime(object));
if (object.field(pinnedField).asBool())
change = change.withPin();
return change;
}
private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, InstanceName instance, Inspector root) {
var assignedRotations = new LinkedHashMap<EndpointId, AssignedRotation>();
root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> {
var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString());
var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString());
var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString());
var regions = deploymentSpec.instance(instance)
.map(spec -> globalEndpointRegions(spec, endpointId))
.orElse(Set.of());
assignedRotations.putIfAbsent(endpointId, new AssignedRotation(clusterId, endpointId, rotationId, regions));
});
return List.copyOf(assignedRotations.values());
}
private Set<RegionName> globalEndpointRegions(DeploymentInstanceSpec spec, EndpointId endpointId) {
if (spec.globalServiceId().isPresent())
return spec.zones().stream()
.flatMap(zone -> zone.region().stream())
.collect(Collectors.toSet());
return spec.endpoints().stream()
.filter(endpoint -> endpoint.endpointId().equals(endpointId.id()))
.flatMap(endpoint -> endpoint.regions().stream())
.collect(Collectors.toSet());
}
} | class ApplicationSerializer {
private static final String idField = "id";
private static final String createdAtField = "createdAt";
private static final String deploymentSpecField = "deploymentSpecField";
private static final String validationOverridesField = "validationOverrides";
private static final String instancesField = "instances";
private static final String deployingField = "deployingField";
private static final String projectIdField = "projectId";
private static final String latestVersionField = "latestVersion";
private static final String pinnedField = "pinned";
private static final String deploymentIssueField = "deploymentIssueId";
private static final String ownershipIssueIdField = "ownershipIssueId";
private static final String ownerField = "confirmedOwner";
private static final String majorVersionField = "majorVersion";
private static final String writeQualityField = "writeQuality";
private static final String queryQualityField = "queryQuality";
private static final String pemDeployKeysField = "pemDeployKeys";
private static final String assignedRotationClusterField = "clusterId";
private static final String assignedRotationRotationField = "rotationId";
private static final String versionField = "version";
private static final String instanceNameField = "instanceName";
private static final String deploymentsField = "deployments";
private static final String deploymentJobsField = "deploymentJobs";
private static final String assignedRotationsField = "assignedRotations";
private static final String assignedRotationEndpointField = "endpointId";
private static final String zoneField = "zone";
private static final String environmentField = "environment";
private static final String regionField = "region";
private static final String deployTimeField = "deployTime";
private static final String applicationBuildNumberField = "applicationBuildNumber";
private static final String applicationPackageRevisionField = "applicationPackageRevision";
private static final String sourceRevisionField = "sourceRevision";
private static final String repositoryField = "repositoryField";
private static final String branchField = "branchField";
private static final String commitField = "commitField";
private static final String authorEmailField = "authorEmailField";
private static final String compileVersionField = "compileVersion";
private static final String buildTimeField = "buildTime";
private static final String sourceUrlField = "sourceUrl";
private static final String lastQueriedField = "lastQueried";
private static final String lastWrittenField = "lastWritten";
private static final String lastQueriesPerSecondField = "lastQueriesPerSecond";
private static final String lastWritesPerSecondField = "lastWritesPerSecond";
private static final String jobStatusField = "jobStatus";
private static final String jobTypeField = "jobType";
private static final String pausedUntilField = "pausedUntil";
private static final String clusterInfoField = "clusterInfo";
private static final String clusterInfoFlavorField = "flavor";
private static final String clusterInfoCostField = "cost";
private static final String clusterInfoCpuField = "flavorCpu";
private static final String clusterInfoMemField = "flavorMem";
private static final String clusterInfoDiskField = "flavorDisk";
private static final String clusterInfoTypeField = "clusterType";
private static final String clusterInfoHostnamesField = "hostnames";
private static final String deploymentMetricsField = "metrics";
private static final String deploymentMetricsQPSField = "queriesPerSecond";
private static final String deploymentMetricsWPSField = "writesPerSecond";
private static final String deploymentMetricsDocsField = "documentCount";
private static final String deploymentMetricsQueryLatencyField = "queryLatencyMillis";
private static final String deploymentMetricsWriteLatencyField = "writeLatencyMillis";
private static final String deploymentMetricsUpdateTime = "lastUpdated";
private static final String deploymentMetricsWarningsField = "warnings";
private static final String rotationStatusField = "rotationStatus2";
private static final String rotationIdField = "rotationId";
private static final String lastUpdatedField = "lastUpdated";
private static final String rotationStateField = "state";
private static final String statusField = "status";
private final Cache<Long, Application> cache = CacheBuilder.newBuilder().maximumSize(1000).build();
public Slime toSlime(Application application) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString(idField, application.id().serialized());
root.setLong(createdAtField, application.createdAt().toEpochMilli());
root.setString(deploymentSpecField, application.deploymentSpec().xmlForm());
root.setString(validationOverridesField, application.validationOverrides().xmlForm());
application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId));
application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value()));
application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value()));
application.owner().ifPresent(owner -> root.setString(ownerField, owner.username()));
application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion));
root.setDouble(queryQualityField, application.metrics().queryServiceQuality());
root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
deployKeysToSlime(application.deployKeys(), root.setArray(pemDeployKeysField));
application.latestVersion().ifPresent(version -> toSlime(version, root.setObject(latestVersionField)));
instancesToSlime(application, root.setArray(instancesField));
return slime;
}
private void instancesToSlime(Application application, Cursor array) {
for (Instance instance : application.instances().values()) {
Cursor instanceObject = array.addObject();
instanceObject.setString(instanceNameField, instance.name().value());
deploymentsToSlime(instance.deployments().values(), instanceObject.setArray(deploymentsField));
toSlime(instance.jobPauses(), instanceObject.setObject(deploymentJobsField));
assignedRotationsToSlime(instance.rotations(), instanceObject, assignedRotationsField);
toSlime(instance.rotationStatus(), instanceObject.setArray(rotationStatusField));
toSlime(instance.change(), instanceObject, deployingField);
}
}
private void deployKeysToSlime(Set<PublicKey> deployKeys, Cursor array) {
deployKeys.forEach(key -> array.addString(KeyUtils.toPem(key)));
}
private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) {
for (Deployment deployment : deployments)
deploymentToSlime(deployment, array.addObject());
}
private void deploymentToSlime(Deployment deployment, Cursor object) {
zoneIdToSlime(deployment.zone(), object.setObject(zoneField));
object.setString(versionField, deployment.version().toString());
object.setLong(deployTimeField, deployment.at().toEpochMilli());
toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField));
clusterInfoToSlime(deployment.clusterInfo(), object);
deploymentMetricsToSlime(deployment.metrics(), object);
deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value));
}
private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) {
Cursor root = object.setObject(deploymentMetricsField);
root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond());
root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond());
root.setDouble(deploymentMetricsDocsField, metrics.documentCount());
root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis());
root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli()));
if (!metrics.warnings().isEmpty()) {
Cursor warningsObject = root.setObject(deploymentMetricsWarningsField);
metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count));
}
}
private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) {
Cursor root = object.setObject(clusterInfoField);
for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) {
toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
}
}
private void toSlime(ClusterInfo info, Cursor object) {
object.setString(clusterInfoFlavorField, info.getFlavor());
object.setLong(clusterInfoCostField, info.getFlavorCost());
object.setDouble(clusterInfoCpuField, info.getFlavorCPU());
object.setDouble(clusterInfoMemField, info.getFlavorMem());
object.setDouble(clusterInfoDiskField, info.getFlavorDisk());
object.setString(clusterInfoTypeField, info.getClusterType().name());
Cursor array = object.setArray(clusterInfoHostnamesField);
for (String host : info.getHostnames()) {
array.addString(host);
}
}
private void zoneIdToSlime(ZoneId zone, Cursor object) {
object.setString(environmentField, zone.environment().value());
object.setString(regionField, zone.region().value());
}
private void toSlime(SourceRevision sourceRevision, Cursor object) {
object.setString(repositoryField, sourceRevision.repository());
object.setString(branchField, sourceRevision.branch());
object.setString(commitField, sourceRevision.commit());
}
private void toSlime(Map<JobType, Instant> jobPauses, Cursor cursor) {
Cursor jobStatusArray = cursor.setArray(jobStatusField);
jobPauses.forEach((type, until) -> {
Cursor jobPauseObject = jobStatusArray.addObject();
jobPauseObject.setString(jobTypeField, type.jobName());
jobPauseObject.setLong(pausedUntilField, until.toEpochMilli());
});
}
private void toSlime(Change deploying, Cursor parentObject, String fieldName) {
if (deploying.isEmpty()) return;
Cursor object = parentObject.setObject(fieldName);
if (deploying.platform().isPresent())
object.setString(versionField, deploying.platform().get().toString());
if (deploying.application().isPresent())
toSlime(deploying.application().get(), object);
if (deploying.isPinned())
object.setBool(pinnedField, true);
}
private void toSlime(RotationStatus status, Cursor array) {
status.asMap().forEach((rotationId, targets) -> {
Cursor rotationObject = array.addObject();
rotationObject.setString(rotationIdField, rotationId.asString());
rotationObject.setLong(lastUpdatedField, targets.lastUpdated().toEpochMilli());
Cursor statusArray = rotationObject.setArray(statusField);
targets.asMap().forEach((zone, state) -> {
Cursor statusObject = statusArray.addObject();
zoneIdToSlime(zone, statusObject);
statusObject.setString(rotationStateField, state.name());
});
});
}
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) {
var rotationsArray = parent.setArray(fieldName);
for (var rotation : rotations) {
var object = rotationsArray.addObject();
object.setString(assignedRotationEndpointField, rotation.endpointId().id());
object.setString(assignedRotationRotationField, rotation.rotationId().asString());
object.setString(assignedRotationClusterField, rotation.clusterId().value());
}
}
public Application fromSlime(byte[] data) {
var key = Hashing.sipHash24().hashBytes(data).asLong();
try {
return cache.get(key, () -> fromSlime(SlimeUtils.jsonToSlime(data)));
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
private Application fromSlime(Slime slime) {
Inspector root = slime.get();
TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString());
Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong());
DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false);
ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
Optional<IssueId> deploymentIssueId = Serializers.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
Optional<IssueId> ownershipIssueId = Serializers.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
Optional<User> owner = Serializers.optionalString(root.field(ownerField)).map(User::from);
OptionalInt majorVersion = Serializers.optionalInteger(root.field(majorVersionField));
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
Set<PublicKey> deployKeys = deployKeysFromSlime(root.field(pemDeployKeysField));
List<Instance> instances = instancesFromSlime(id, deploymentSpec, root.field(instancesField));
OptionalLong projectId = Serializers.optionalLong(root.field(projectIdField));
Optional<ApplicationVersion> latestVersion = latestVersionFromSlime(root.field(latestVersionField));
return new Application(id, createdAt, deploymentSpec, validationOverrides,
deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics,
deployKeys, projectId, latestVersion, instances);
}
private Optional<ApplicationVersion> latestVersionFromSlime(Inspector latestVersionObject) {
if (latestVersionObject.valid())
return Optional.of(applicationVersionFromSlime(latestVersionObject));
return Optional.empty();
}
private List<Instance> instancesFromSlime(TenantAndApplicationId id, DeploymentSpec deploymentSpec, Inspector field) {
List<Instance> instances = new ArrayList<>();
field.traverse((ArrayTraverser) (name, object) -> {
InstanceName instanceName = InstanceName.from(object.field(instanceNameField).asString());
List<Deployment> deployments = deploymentsFromSlime(object.field(deploymentsField));
Map<JobType, Instant> jobPauses = jobPausesFromSlime(object.field(deploymentJobsField));
List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, instanceName, object);
RotationStatus rotationStatus = rotationStatusFromSlime(object);
Change change = changeFromSlime(object.field(deployingField));
instances.add(new Instance(id.instance(instanceName),
deployments,
jobPauses,
assignedRotations,
rotationStatus,
change));
});
return instances;
}
private Set<PublicKey> deployKeysFromSlime(Inspector array) {
Set<PublicKey> keys = new LinkedHashSet<>();
array.traverse((ArrayTraverser) (__, key) -> keys.add(KeyUtils.fromPemEncodedPublicKey(key.asString())));
return keys;
}
private List<Deployment> deploymentsFromSlime(Inspector array) {
List<Deployment> deployments = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item)));
return deployments;
}
private Deployment deploymentFromSlime(Inspector deploymentObject) {
return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)),
applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)),
Version.fromString(deploymentObject.field(versionField).asString()),
Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()),
clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)),
deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)),
DeploymentActivity.create(Serializers.optionalInstant(deploymentObject.field(lastQueriedField)),
Serializers.optionalInstant(deploymentObject.field(lastWrittenField)),
Serializers.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)),
Serializers.optionalDouble(deploymentObject.field(lastWritesPerSecondField))));
}
private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) {
Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ?
Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) :
Optional.empty();
return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(),
object.field(deploymentMetricsWPSField).asDouble(),
object.field(deploymentMetricsDocsField).asDouble(),
object.field(deploymentMetricsQueryLatencyField).asDouble(),
object.field(deploymentMetricsWriteLatencyField).asDouble(),
instant,
deploymentWarningsFrom(object.field(deploymentMetricsWarningsField)));
}
private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) {
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name),
(int) value.asLong()));
return Collections.unmodifiableMap(warnings);
}
private RotationStatus rotationStatusFromSlime(Inspector parentObject) {
var object = parentObject.field(rotationStatusField);
var statusMap = new LinkedHashMap<RotationId, RotationStatus.Targets>();
object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()),
new RotationStatus.Targets(
singleRotationStatusFromSlime(statusObject.field(statusField)),
Instant.ofEpochMilli(statusObject.field(lastUpdatedField).asLong()))));
return RotationStatus.from(statusMap);
}
private Map<ZoneId, RotationState> singleRotationStatusFromSlime(Inspector object) {
if (!object.valid()) {
return Collections.emptyMap();
}
Map<ZoneId, RotationState> rotationStatus = new LinkedHashMap<>();
object.traverse((ArrayTraverser) (idx, statusObject) -> {
var zone = zoneIdFromSlime(statusObject);
var status = RotationState.valueOf(statusObject.field(rotationStateField).asString());
rotationStatus.put(zone, status);
});
return Collections.unmodifiableMap(rotationStatus);
}
private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) {
Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>();
object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value)));
return map;
}
private ClusterInfo clusterInfoFromSlime(Inspector inspector) {
String flavor = inspector.field(clusterInfoFlavorField).asString();
int cost = (int)inspector.field(clusterInfoCostField).asLong();
String type = inspector.field(clusterInfoTypeField).asString();
double flavorCpu = inspector.field(clusterInfoCpuField).asDouble();
double flavorMem = inspector.field(clusterInfoMemField).asDouble();
double flavorDisk = inspector.field(clusterInfoDiskField).asDouble();
List<String> hostnames = new ArrayList<>();
inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString()));
return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames);
}
private ZoneId zoneIdFromSlime(Inspector object) {
return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString());
}
private ApplicationVersion applicationVersionFromSlime(Inspector object) {
if ( ! object.valid()) return ApplicationVersion.unknown;
OptionalLong applicationBuildNumber = Serializers.optionalLong(object.field(applicationBuildNumberField));
if (applicationBuildNumber.isEmpty())
return ApplicationVersion.unknown;
Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField));
Optional<String> authorEmail = Serializers.optionalString(object.field(authorEmailField));
Optional<Version> compileVersion = Serializers.optionalString(object.field(compileVersionField)).map(Version::fromString);
Optional<Instant> buildTime = Serializers.optionalInstant(object.field(buildTimeField));
Optional<String> sourceUrl = Serializers.optionalString(object.field(sourceUrlField));
Optional<String> commit = Serializers.optionalString(object.field(commitField));
return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit);
}
private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
var repository = object.field(repositoryField).asString();
var branch = object.field(branchField).asString();
var commit = object.field(commitField).asString();
if (repository.isBlank() && branch.isBlank() && commit.isBlank()) return Optional.empty();
return Optional.of(new SourceRevision(repository, branch, commit));
}
private Map<JobType, Instant> jobPausesFromSlime(Inspector object) {
Map<JobType, Instant> jobPauses = new HashMap<>();
object.field(jobStatusField).traverse((ArrayTraverser) (__, jobPauseObject) ->
JobType.fromOptionalJobName(jobPauseObject.field(jobTypeField).asString())
.ifPresent(jobType -> jobPauses.put(jobType,
Instant.ofEpochMilli(jobPauseObject.field(pausedUntilField).asLong()))));
return jobPauses;
}
private Change changeFromSlime(Inspector object) {
if ( ! object.valid()) return Change.empty();
Inspector versionFieldValue = object.field(versionField);
Change change = Change.empty();
if (versionFieldValue.valid())
change = Change.of(Version.fromString(versionFieldValue.asString()));
if (object.field(applicationBuildNumberField).valid())
change = change.with(applicationVersionFromSlime(object));
if (object.field(pinnedField).asBool())
change = change.withPin();
return change;
}
private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, InstanceName instance, Inspector root) {
var assignedRotations = new LinkedHashMap<EndpointId, AssignedRotation>();
root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> {
var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString());
var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString());
var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString());
var regions = deploymentSpec.instance(instance)
.map(spec -> globalEndpointRegions(spec, endpointId))
.orElse(Set.of());
assignedRotations.putIfAbsent(endpointId, new AssignedRotation(clusterId, endpointId, rotationId, regions));
});
return List.copyOf(assignedRotations.values());
}
private Set<RegionName> globalEndpointRegions(DeploymentInstanceSpec spec, EndpointId endpointId) {
if (spec.globalServiceId().isPresent())
return spec.zones().stream()
.flatMap(zone -> zone.region().stream())
.collect(Collectors.toSet());
return spec.endpoints().stream()
.filter(endpoint -> endpoint.endpointId().equals(endpointId.id()))
.flatMap(endpoint -> endpoint.regions().stream())
.collect(Collectors.toSet());
}
} |
> The plan was to remove SourceRevision. Please do! 🔥 | private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber));
applicationVersion.source().ifPresent(source -> toSlime(source, object.setObject(sourceRevisionField)));
applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email));
applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString()));
applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli()));
applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url));
applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit));
} | applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber)); | private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
applicationVersion.buildNumber().ifPresent(buildNumber -> object.setLong(applicationBuildNumberField, buildNumber));
applicationVersion.source().ifPresent(source -> toSlime(source, object.setObject(sourceRevisionField)));
applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email));
applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString()));
applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli()));
applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url));
applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit));
} | class ApplicationSerializer {
private static final String idField = "id";
private static final String createdAtField = "createdAt";
private static final String deploymentSpecField = "deploymentSpecField";
private static final String validationOverridesField = "validationOverrides";
private static final String instancesField = "instances";
private static final String deployingField = "deployingField";
private static final String projectIdField = "projectId";
private static final String latestVersionField = "latestVersion";
private static final String pinnedField = "pinned";
private static final String deploymentIssueField = "deploymentIssueId";
private static final String ownershipIssueIdField = "ownershipIssueId";
private static final String ownerField = "confirmedOwner";
private static final String majorVersionField = "majorVersion";
private static final String writeQualityField = "writeQuality";
private static final String queryQualityField = "queryQuality";
private static final String pemDeployKeysField = "pemDeployKeys";
private static final String assignedRotationClusterField = "clusterId";
private static final String assignedRotationRotationField = "rotationId";
private static final String versionField = "version";
private static final String instanceNameField = "instanceName";
private static final String deploymentsField = "deployments";
private static final String deploymentJobsField = "deploymentJobs";
private static final String assignedRotationsField = "assignedRotations";
private static final String assignedRotationEndpointField = "endpointId";
private static final String zoneField = "zone";
private static final String environmentField = "environment";
private static final String regionField = "region";
private static final String deployTimeField = "deployTime";
private static final String applicationBuildNumberField = "applicationBuildNumber";
private static final String applicationPackageRevisionField = "applicationPackageRevision";
private static final String sourceRevisionField = "sourceRevision";
private static final String repositoryField = "repositoryField";
private static final String branchField = "branchField";
private static final String commitField = "commitField";
private static final String authorEmailField = "authorEmailField";
private static final String compileVersionField = "compileVersion";
private static final String buildTimeField = "buildTime";
private static final String sourceUrlField = "sourceUrl";
private static final String lastQueriedField = "lastQueried";
private static final String lastWrittenField = "lastWritten";
private static final String lastQueriesPerSecondField = "lastQueriesPerSecond";
private static final String lastWritesPerSecondField = "lastWritesPerSecond";
private static final String jobStatusField = "jobStatus";
private static final String jobTypeField = "jobType";
private static final String pausedUntilField = "pausedUntil";
private static final String clusterInfoField = "clusterInfo";
private static final String clusterInfoFlavorField = "flavor";
private static final String clusterInfoCostField = "cost";
private static final String clusterInfoCpuField = "flavorCpu";
private static final String clusterInfoMemField = "flavorMem";
private static final String clusterInfoDiskField = "flavorDisk";
private static final String clusterInfoTypeField = "clusterType";
private static final String clusterInfoHostnamesField = "hostnames";
private static final String deploymentMetricsField = "metrics";
private static final String deploymentMetricsQPSField = "queriesPerSecond";
private static final String deploymentMetricsWPSField = "writesPerSecond";
private static final String deploymentMetricsDocsField = "documentCount";
private static final String deploymentMetricsQueryLatencyField = "queryLatencyMillis";
private static final String deploymentMetricsWriteLatencyField = "writeLatencyMillis";
private static final String deploymentMetricsUpdateTime = "lastUpdated";
private static final String deploymentMetricsWarningsField = "warnings";
private static final String rotationStatusField = "rotationStatus2";
private static final String rotationIdField = "rotationId";
private static final String lastUpdatedField = "lastUpdated";
private static final String rotationStateField = "state";
private static final String statusField = "status";
private final Cache<Long, Application> cache = CacheBuilder.newBuilder().maximumSize(1000).build();
public Slime toSlime(Application application) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString(idField, application.id().serialized());
root.setLong(createdAtField, application.createdAt().toEpochMilli());
root.setString(deploymentSpecField, application.deploymentSpec().xmlForm());
root.setString(validationOverridesField, application.validationOverrides().xmlForm());
application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId));
application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value()));
application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value()));
application.owner().ifPresent(owner -> root.setString(ownerField, owner.username()));
application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion));
root.setDouble(queryQualityField, application.metrics().queryServiceQuality());
root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
deployKeysToSlime(application.deployKeys(), root.setArray(pemDeployKeysField));
application.latestVersion().ifPresent(version -> toSlime(version, root.setObject(latestVersionField)));
instancesToSlime(application, root.setArray(instancesField));
return slime;
}
private void instancesToSlime(Application application, Cursor array) {
for (Instance instance : application.instances().values()) {
Cursor instanceObject = array.addObject();
instanceObject.setString(instanceNameField, instance.name().value());
deploymentsToSlime(instance.deployments().values(), instanceObject.setArray(deploymentsField));
toSlime(instance.jobPauses(), instanceObject.setObject(deploymentJobsField));
assignedRotationsToSlime(instance.rotations(), instanceObject, assignedRotationsField);
toSlime(instance.rotationStatus(), instanceObject.setArray(rotationStatusField));
toSlime(instance.change(), instanceObject, deployingField);
}
}
private void deployKeysToSlime(Set<PublicKey> deployKeys, Cursor array) {
deployKeys.forEach(key -> array.addString(KeyUtils.toPem(key)));
}
private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) {
for (Deployment deployment : deployments)
deploymentToSlime(deployment, array.addObject());
}
private void deploymentToSlime(Deployment deployment, Cursor object) {
zoneIdToSlime(deployment.zone(), object.setObject(zoneField));
object.setString(versionField, deployment.version().toString());
object.setLong(deployTimeField, deployment.at().toEpochMilli());
toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField));
clusterInfoToSlime(deployment.clusterInfo(), object);
deploymentMetricsToSlime(deployment.metrics(), object);
deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value));
}
private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) {
Cursor root = object.setObject(deploymentMetricsField);
root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond());
root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond());
root.setDouble(deploymentMetricsDocsField, metrics.documentCount());
root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis());
root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli()));
if (!metrics.warnings().isEmpty()) {
Cursor warningsObject = root.setObject(deploymentMetricsWarningsField);
metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count));
}
}
private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) {
Cursor root = object.setObject(clusterInfoField);
for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) {
toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
}
}
private void toSlime(ClusterInfo info, Cursor object) {
object.setString(clusterInfoFlavorField, info.getFlavor());
object.setLong(clusterInfoCostField, info.getFlavorCost());
object.setDouble(clusterInfoCpuField, info.getFlavorCPU());
object.setDouble(clusterInfoMemField, info.getFlavorMem());
object.setDouble(clusterInfoDiskField, info.getFlavorDisk());
object.setString(clusterInfoTypeField, info.getClusterType().name());
Cursor array = object.setArray(clusterInfoHostnamesField);
for (String host : info.getHostnames()) {
array.addString(host);
}
}
private void zoneIdToSlime(ZoneId zone, Cursor object) {
object.setString(environmentField, zone.environment().value());
object.setString(regionField, zone.region().value());
}
private void toSlime(SourceRevision sourceRevision, Cursor object) {
object.setString(repositoryField, sourceRevision.repository());
object.setString(branchField, sourceRevision.branch());
object.setString(commitField, sourceRevision.commit());
}
private void toSlime(Map<JobType, Instant> jobPauses, Cursor cursor) {
Cursor jobStatusArray = cursor.setArray(jobStatusField);
jobPauses.forEach((type, until) -> {
Cursor jobPauseObject = jobStatusArray.addObject();
jobPauseObject.setString(jobTypeField, type.jobName());
jobPauseObject.setLong(pausedUntilField, until.toEpochMilli());
});
}
private void toSlime(Change deploying, Cursor parentObject, String fieldName) {
if (deploying.isEmpty()) return;
Cursor object = parentObject.setObject(fieldName);
if (deploying.platform().isPresent())
object.setString(versionField, deploying.platform().get().toString());
if (deploying.application().isPresent())
toSlime(deploying.application().get(), object);
if (deploying.isPinned())
object.setBool(pinnedField, true);
}
private void toSlime(RotationStatus status, Cursor array) {
status.asMap().forEach((rotationId, targets) -> {
Cursor rotationObject = array.addObject();
rotationObject.setString(rotationIdField, rotationId.asString());
rotationObject.setLong(lastUpdatedField, targets.lastUpdated().toEpochMilli());
Cursor statusArray = rotationObject.setArray(statusField);
targets.asMap().forEach((zone, state) -> {
Cursor statusObject = statusArray.addObject();
zoneIdToSlime(zone, statusObject);
statusObject.setString(rotationStateField, state.name());
});
});
}
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) {
var rotationsArray = parent.setArray(fieldName);
for (var rotation : rotations) {
var object = rotationsArray.addObject();
object.setString(assignedRotationEndpointField, rotation.endpointId().id());
object.setString(assignedRotationRotationField, rotation.rotationId().asString());
object.setString(assignedRotationClusterField, rotation.clusterId().value());
}
}
public Application fromSlime(byte[] data) {
var key = Hashing.sipHash24().hashBytes(data).asLong();
try {
return cache.get(key, () -> fromSlime(SlimeUtils.jsonToSlime(data)));
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
private Application fromSlime(Slime slime) {
Inspector root = slime.get();
TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString());
Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong());
DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false);
ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
Optional<IssueId> deploymentIssueId = Serializers.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
Optional<IssueId> ownershipIssueId = Serializers.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
Optional<User> owner = Serializers.optionalString(root.field(ownerField)).map(User::from);
OptionalInt majorVersion = Serializers.optionalInteger(root.field(majorVersionField));
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
Set<PublicKey> deployKeys = deployKeysFromSlime(root.field(pemDeployKeysField));
List<Instance> instances = instancesFromSlime(id, deploymentSpec, root.field(instancesField));
OptionalLong projectId = Serializers.optionalLong(root.field(projectIdField));
Optional<ApplicationVersion> latestVersion = latestVersionFromSlime(root.field(latestVersionField));
return new Application(id, createdAt, deploymentSpec, validationOverrides,
deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics,
deployKeys, projectId, latestVersion, instances);
}
private Optional<ApplicationVersion> latestVersionFromSlime(Inspector latestVersionObject) {
if (latestVersionObject.valid())
return Optional.of(applicationVersionFromSlime(latestVersionObject));
return Optional.empty();
}
private List<Instance> instancesFromSlime(TenantAndApplicationId id, DeploymentSpec deploymentSpec, Inspector field) {
List<Instance> instances = new ArrayList<>();
field.traverse((ArrayTraverser) (name, object) -> {
InstanceName instanceName = InstanceName.from(object.field(instanceNameField).asString());
List<Deployment> deployments = deploymentsFromSlime(object.field(deploymentsField));
Map<JobType, Instant> jobPauses = jobPausesFromSlime(object.field(deploymentJobsField));
List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, instanceName, object);
RotationStatus rotationStatus = rotationStatusFromSlime(object);
Change change = changeFromSlime(object.field(deployingField));
instances.add(new Instance(id.instance(instanceName),
deployments,
jobPauses,
assignedRotations,
rotationStatus,
change));
});
return instances;
}
private Set<PublicKey> deployKeysFromSlime(Inspector array) {
Set<PublicKey> keys = new LinkedHashSet<>();
array.traverse((ArrayTraverser) (__, key) -> keys.add(KeyUtils.fromPemEncodedPublicKey(key.asString())));
return keys;
}
private List<Deployment> deploymentsFromSlime(Inspector array) {
List<Deployment> deployments = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item)));
return deployments;
}
private Deployment deploymentFromSlime(Inspector deploymentObject) {
return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)),
applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)),
Version.fromString(deploymentObject.field(versionField).asString()),
Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()),
clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)),
deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)),
DeploymentActivity.create(Serializers.optionalInstant(deploymentObject.field(lastQueriedField)),
Serializers.optionalInstant(deploymentObject.field(lastWrittenField)),
Serializers.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)),
Serializers.optionalDouble(deploymentObject.field(lastWritesPerSecondField))));
}
private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) {
Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ?
Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) :
Optional.empty();
return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(),
object.field(deploymentMetricsWPSField).asDouble(),
object.field(deploymentMetricsDocsField).asDouble(),
object.field(deploymentMetricsQueryLatencyField).asDouble(),
object.field(deploymentMetricsWriteLatencyField).asDouble(),
instant,
deploymentWarningsFrom(object.field(deploymentMetricsWarningsField)));
}
private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) {
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name),
(int) value.asLong()));
return Collections.unmodifiableMap(warnings);
}
private RotationStatus rotationStatusFromSlime(Inspector parentObject) {
var object = parentObject.field(rotationStatusField);
var statusMap = new LinkedHashMap<RotationId, RotationStatus.Targets>();
object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()),
new RotationStatus.Targets(
singleRotationStatusFromSlime(statusObject.field(statusField)),
Instant.ofEpochMilli(statusObject.field(lastUpdatedField).asLong()))));
return RotationStatus.from(statusMap);
}
private Map<ZoneId, RotationState> singleRotationStatusFromSlime(Inspector object) {
if (!object.valid()) {
return Collections.emptyMap();
}
Map<ZoneId, RotationState> rotationStatus = new LinkedHashMap<>();
object.traverse((ArrayTraverser) (idx, statusObject) -> {
var zone = zoneIdFromSlime(statusObject);
var status = RotationState.valueOf(statusObject.field(rotationStateField).asString());
rotationStatus.put(zone, status);
});
return Collections.unmodifiableMap(rotationStatus);
}
private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) {
Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>();
object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value)));
return map;
}
private ClusterInfo clusterInfoFromSlime(Inspector inspector) {
String flavor = inspector.field(clusterInfoFlavorField).asString();
int cost = (int)inspector.field(clusterInfoCostField).asLong();
String type = inspector.field(clusterInfoTypeField).asString();
double flavorCpu = inspector.field(clusterInfoCpuField).asDouble();
double flavorMem = inspector.field(clusterInfoMemField).asDouble();
double flavorDisk = inspector.field(clusterInfoDiskField).asDouble();
List<String> hostnames = new ArrayList<>();
inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString()));
return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames);
}
private ZoneId zoneIdFromSlime(Inspector object) {
return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString());
}
private ApplicationVersion applicationVersionFromSlime(Inspector object) {
if ( ! object.valid()) return ApplicationVersion.unknown;
OptionalLong applicationBuildNumber = Serializers.optionalLong(object.field(applicationBuildNumberField));
if (applicationBuildNumber.isEmpty())
return ApplicationVersion.unknown;
Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField));
Optional<String> authorEmail = Serializers.optionalString(object.field(authorEmailField));
Optional<Version> compileVersion = Serializers.optionalString(object.field(compileVersionField)).map(Version::fromString);
Optional<Instant> buildTime = Serializers.optionalInstant(object.field(buildTimeField));
Optional<String> sourceUrl = Serializers.optionalString(object.field(sourceUrlField));
Optional<String> commit = Serializers.optionalString(object.field(commitField));
return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit);
}
private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
var repository = object.field(repositoryField).asString();
var branch = object.field(branchField).asString();
var commit = object.field(commitField).asString();
if (repository.isBlank() && branch.isBlank() && commit.isBlank()) return Optional.empty();
return Optional.of(new SourceRevision(repository, branch, commit));
}
private Map<JobType, Instant> jobPausesFromSlime(Inspector object) {
Map<JobType, Instant> jobPauses = new HashMap<>();
object.field(jobStatusField).traverse((ArrayTraverser) (__, jobPauseObject) ->
JobType.fromOptionalJobName(jobPauseObject.field(jobTypeField).asString())
.ifPresent(jobType -> jobPauses.put(jobType,
Instant.ofEpochMilli(jobPauseObject.field(pausedUntilField).asLong()))));
return jobPauses;
}
private Change changeFromSlime(Inspector object) {
if ( ! object.valid()) return Change.empty();
Inspector versionFieldValue = object.field(versionField);
Change change = Change.empty();
if (versionFieldValue.valid())
change = Change.of(Version.fromString(versionFieldValue.asString()));
if (object.field(applicationBuildNumberField).valid())
change = change.with(applicationVersionFromSlime(object));
if (object.field(pinnedField).asBool())
change = change.withPin();
return change;
}
private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, InstanceName instance, Inspector root) {
var assignedRotations = new LinkedHashMap<EndpointId, AssignedRotation>();
root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> {
var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString());
var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString());
var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString());
var regions = deploymentSpec.instance(instance)
.map(spec -> globalEndpointRegions(spec, endpointId))
.orElse(Set.of());
assignedRotations.putIfAbsent(endpointId, new AssignedRotation(clusterId, endpointId, rotationId, regions));
});
return List.copyOf(assignedRotations.values());
}
private Set<RegionName> globalEndpointRegions(DeploymentInstanceSpec spec, EndpointId endpointId) {
if (spec.globalServiceId().isPresent())
return spec.zones().stream()
.flatMap(zone -> zone.region().stream())
.collect(Collectors.toSet());
return spec.endpoints().stream()
.filter(endpoint -> endpoint.endpointId().equals(endpointId.id()))
.flatMap(endpoint -> endpoint.regions().stream())
.collect(Collectors.toSet());
}
} | class ApplicationSerializer {
private static final String idField = "id";
private static final String createdAtField = "createdAt";
private static final String deploymentSpecField = "deploymentSpecField";
private static final String validationOverridesField = "validationOverrides";
private static final String instancesField = "instances";
private static final String deployingField = "deployingField";
private static final String projectIdField = "projectId";
private static final String latestVersionField = "latestVersion";
private static final String pinnedField = "pinned";
private static final String deploymentIssueField = "deploymentIssueId";
private static final String ownershipIssueIdField = "ownershipIssueId";
private static final String ownerField = "confirmedOwner";
private static final String majorVersionField = "majorVersion";
private static final String writeQualityField = "writeQuality";
private static final String queryQualityField = "queryQuality";
private static final String pemDeployKeysField = "pemDeployKeys";
private static final String assignedRotationClusterField = "clusterId";
private static final String assignedRotationRotationField = "rotationId";
private static final String versionField = "version";
private static final String instanceNameField = "instanceName";
private static final String deploymentsField = "deployments";
private static final String deploymentJobsField = "deploymentJobs";
private static final String assignedRotationsField = "assignedRotations";
private static final String assignedRotationEndpointField = "endpointId";
private static final String zoneField = "zone";
private static final String environmentField = "environment";
private static final String regionField = "region";
private static final String deployTimeField = "deployTime";
private static final String applicationBuildNumberField = "applicationBuildNumber";
private static final String applicationPackageRevisionField = "applicationPackageRevision";
private static final String sourceRevisionField = "sourceRevision";
private static final String repositoryField = "repositoryField";
private static final String branchField = "branchField";
private static final String commitField = "commitField";
private static final String authorEmailField = "authorEmailField";
private static final String compileVersionField = "compileVersion";
private static final String buildTimeField = "buildTime";
private static final String sourceUrlField = "sourceUrl";
private static final String lastQueriedField = "lastQueried";
private static final String lastWrittenField = "lastWritten";
private static final String lastQueriesPerSecondField = "lastQueriesPerSecond";
private static final String lastWritesPerSecondField = "lastWritesPerSecond";
private static final String jobStatusField = "jobStatus";
private static final String jobTypeField = "jobType";
private static final String pausedUntilField = "pausedUntil";
private static final String clusterInfoField = "clusterInfo";
private static final String clusterInfoFlavorField = "flavor";
private static final String clusterInfoCostField = "cost";
private static final String clusterInfoCpuField = "flavorCpu";
private static final String clusterInfoMemField = "flavorMem";
private static final String clusterInfoDiskField = "flavorDisk";
private static final String clusterInfoTypeField = "clusterType";
private static final String clusterInfoHostnamesField = "hostnames";
private static final String deploymentMetricsField = "metrics";
private static final String deploymentMetricsQPSField = "queriesPerSecond";
private static final String deploymentMetricsWPSField = "writesPerSecond";
private static final String deploymentMetricsDocsField = "documentCount";
private static final String deploymentMetricsQueryLatencyField = "queryLatencyMillis";
private static final String deploymentMetricsWriteLatencyField = "writeLatencyMillis";
private static final String deploymentMetricsUpdateTime = "lastUpdated";
private static final String deploymentMetricsWarningsField = "warnings";
private static final String rotationStatusField = "rotationStatus2";
private static final String rotationIdField = "rotationId";
private static final String lastUpdatedField = "lastUpdated";
private static final String rotationStateField = "state";
private static final String statusField = "status";
private final Cache<Long, Application> cache = CacheBuilder.newBuilder().maximumSize(1000).build();
public Slime toSlime(Application application) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString(idField, application.id().serialized());
root.setLong(createdAtField, application.createdAt().toEpochMilli());
root.setString(deploymentSpecField, application.deploymentSpec().xmlForm());
root.setString(validationOverridesField, application.validationOverrides().xmlForm());
application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId));
application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value()));
application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value()));
application.owner().ifPresent(owner -> root.setString(ownerField, owner.username()));
application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion));
root.setDouble(queryQualityField, application.metrics().queryServiceQuality());
root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
deployKeysToSlime(application.deployKeys(), root.setArray(pemDeployKeysField));
application.latestVersion().ifPresent(version -> toSlime(version, root.setObject(latestVersionField)));
instancesToSlime(application, root.setArray(instancesField));
return slime;
}
private void instancesToSlime(Application application, Cursor array) {
for (Instance instance : application.instances().values()) {
Cursor instanceObject = array.addObject();
instanceObject.setString(instanceNameField, instance.name().value());
deploymentsToSlime(instance.deployments().values(), instanceObject.setArray(deploymentsField));
toSlime(instance.jobPauses(), instanceObject.setObject(deploymentJobsField));
assignedRotationsToSlime(instance.rotations(), instanceObject, assignedRotationsField);
toSlime(instance.rotationStatus(), instanceObject.setArray(rotationStatusField));
toSlime(instance.change(), instanceObject, deployingField);
}
}
private void deployKeysToSlime(Set<PublicKey> deployKeys, Cursor array) {
deployKeys.forEach(key -> array.addString(KeyUtils.toPem(key)));
}
private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) {
for (Deployment deployment : deployments)
deploymentToSlime(deployment, array.addObject());
}
private void deploymentToSlime(Deployment deployment, Cursor object) {
zoneIdToSlime(deployment.zone(), object.setObject(zoneField));
object.setString(versionField, deployment.version().toString());
object.setLong(deployTimeField, deployment.at().toEpochMilli());
toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField));
clusterInfoToSlime(deployment.clusterInfo(), object);
deploymentMetricsToSlime(deployment.metrics(), object);
deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value));
}
private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) {
Cursor root = object.setObject(deploymentMetricsField);
root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond());
root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond());
root.setDouble(deploymentMetricsDocsField, metrics.documentCount());
root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis());
root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli()));
if (!metrics.warnings().isEmpty()) {
Cursor warningsObject = root.setObject(deploymentMetricsWarningsField);
metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count));
}
}
private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) {
Cursor root = object.setObject(clusterInfoField);
for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) {
toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
}
}
private void toSlime(ClusterInfo info, Cursor object) {
object.setString(clusterInfoFlavorField, info.getFlavor());
object.setLong(clusterInfoCostField, info.getFlavorCost());
object.setDouble(clusterInfoCpuField, info.getFlavorCPU());
object.setDouble(clusterInfoMemField, info.getFlavorMem());
object.setDouble(clusterInfoDiskField, info.getFlavorDisk());
object.setString(clusterInfoTypeField, info.getClusterType().name());
Cursor array = object.setArray(clusterInfoHostnamesField);
for (String host : info.getHostnames()) {
array.addString(host);
}
}
private void zoneIdToSlime(ZoneId zone, Cursor object) {
object.setString(environmentField, zone.environment().value());
object.setString(regionField, zone.region().value());
}
private void toSlime(SourceRevision sourceRevision, Cursor object) {
object.setString(repositoryField, sourceRevision.repository());
object.setString(branchField, sourceRevision.branch());
object.setString(commitField, sourceRevision.commit());
}
private void toSlime(Map<JobType, Instant> jobPauses, Cursor cursor) {
Cursor jobStatusArray = cursor.setArray(jobStatusField);
jobPauses.forEach((type, until) -> {
Cursor jobPauseObject = jobStatusArray.addObject();
jobPauseObject.setString(jobTypeField, type.jobName());
jobPauseObject.setLong(pausedUntilField, until.toEpochMilli());
});
}
private void toSlime(Change deploying, Cursor parentObject, String fieldName) {
if (deploying.isEmpty()) return;
Cursor object = parentObject.setObject(fieldName);
if (deploying.platform().isPresent())
object.setString(versionField, deploying.platform().get().toString());
if (deploying.application().isPresent())
toSlime(deploying.application().get(), object);
if (deploying.isPinned())
object.setBool(pinnedField, true);
}
private void toSlime(RotationStatus status, Cursor array) {
status.asMap().forEach((rotationId, targets) -> {
Cursor rotationObject = array.addObject();
rotationObject.setString(rotationIdField, rotationId.asString());
rotationObject.setLong(lastUpdatedField, targets.lastUpdated().toEpochMilli());
Cursor statusArray = rotationObject.setArray(statusField);
targets.asMap().forEach((zone, state) -> {
Cursor statusObject = statusArray.addObject();
zoneIdToSlime(zone, statusObject);
statusObject.setString(rotationStateField, state.name());
});
});
}
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) {
var rotationsArray = parent.setArray(fieldName);
for (var rotation : rotations) {
var object = rotationsArray.addObject();
object.setString(assignedRotationEndpointField, rotation.endpointId().id());
object.setString(assignedRotationRotationField, rotation.rotationId().asString());
object.setString(assignedRotationClusterField, rotation.clusterId().value());
}
}
public Application fromSlime(byte[] data) {
var key = Hashing.sipHash24().hashBytes(data).asLong();
try {
return cache.get(key, () -> fromSlime(SlimeUtils.jsonToSlime(data)));
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
private Application fromSlime(Slime slime) {
Inspector root = slime.get();
TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString());
Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong());
DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false);
ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
Optional<IssueId> deploymentIssueId = Serializers.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
Optional<IssueId> ownershipIssueId = Serializers.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
Optional<User> owner = Serializers.optionalString(root.field(ownerField)).map(User::from);
OptionalInt majorVersion = Serializers.optionalInteger(root.field(majorVersionField));
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
Set<PublicKey> deployKeys = deployKeysFromSlime(root.field(pemDeployKeysField));
List<Instance> instances = instancesFromSlime(id, deploymentSpec, root.field(instancesField));
OptionalLong projectId = Serializers.optionalLong(root.field(projectIdField));
Optional<ApplicationVersion> latestVersion = latestVersionFromSlime(root.field(latestVersionField));
return new Application(id, createdAt, deploymentSpec, validationOverrides,
deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics,
deployKeys, projectId, latestVersion, instances);
}
private Optional<ApplicationVersion> latestVersionFromSlime(Inspector latestVersionObject) {
if (latestVersionObject.valid())
return Optional.of(applicationVersionFromSlime(latestVersionObject));
return Optional.empty();
}
private List<Instance> instancesFromSlime(TenantAndApplicationId id, DeploymentSpec deploymentSpec, Inspector field) {
List<Instance> instances = new ArrayList<>();
field.traverse((ArrayTraverser) (name, object) -> {
InstanceName instanceName = InstanceName.from(object.field(instanceNameField).asString());
List<Deployment> deployments = deploymentsFromSlime(object.field(deploymentsField));
Map<JobType, Instant> jobPauses = jobPausesFromSlime(object.field(deploymentJobsField));
List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, instanceName, object);
RotationStatus rotationStatus = rotationStatusFromSlime(object);
Change change = changeFromSlime(object.field(deployingField));
instances.add(new Instance(id.instance(instanceName),
deployments,
jobPauses,
assignedRotations,
rotationStatus,
change));
});
return instances;
}
private Set<PublicKey> deployKeysFromSlime(Inspector array) {
Set<PublicKey> keys = new LinkedHashSet<>();
array.traverse((ArrayTraverser) (__, key) -> keys.add(KeyUtils.fromPemEncodedPublicKey(key.asString())));
return keys;
}
private List<Deployment> deploymentsFromSlime(Inspector array) {
List<Deployment> deployments = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item)));
return deployments;
}
private Deployment deploymentFromSlime(Inspector deploymentObject) {
return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)),
applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)),
Version.fromString(deploymentObject.field(versionField).asString()),
Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()),
clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)),
deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)),
DeploymentActivity.create(Serializers.optionalInstant(deploymentObject.field(lastQueriedField)),
Serializers.optionalInstant(deploymentObject.field(lastWrittenField)),
Serializers.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)),
Serializers.optionalDouble(deploymentObject.field(lastWritesPerSecondField))));
}
private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) {
Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ?
Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) :
Optional.empty();
return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(),
object.field(deploymentMetricsWPSField).asDouble(),
object.field(deploymentMetricsDocsField).asDouble(),
object.field(deploymentMetricsQueryLatencyField).asDouble(),
object.field(deploymentMetricsWriteLatencyField).asDouble(),
instant,
deploymentWarningsFrom(object.field(deploymentMetricsWarningsField)));
}
private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) {
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name),
(int) value.asLong()));
return Collections.unmodifiableMap(warnings);
}
private RotationStatus rotationStatusFromSlime(Inspector parentObject) {
var object = parentObject.field(rotationStatusField);
var statusMap = new LinkedHashMap<RotationId, RotationStatus.Targets>();
object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()),
new RotationStatus.Targets(
singleRotationStatusFromSlime(statusObject.field(statusField)),
Instant.ofEpochMilli(statusObject.field(lastUpdatedField).asLong()))));
return RotationStatus.from(statusMap);
}
private Map<ZoneId, RotationState> singleRotationStatusFromSlime(Inspector object) {
if (!object.valid()) {
return Collections.emptyMap();
}
Map<ZoneId, RotationState> rotationStatus = new LinkedHashMap<>();
object.traverse((ArrayTraverser) (idx, statusObject) -> {
var zone = zoneIdFromSlime(statusObject);
var status = RotationState.valueOf(statusObject.field(rotationStateField).asString());
rotationStatus.put(zone, status);
});
return Collections.unmodifiableMap(rotationStatus);
}
private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) {
Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>();
object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value)));
return map;
}
private ClusterInfo clusterInfoFromSlime(Inspector inspector) {
String flavor = inspector.field(clusterInfoFlavorField).asString();
int cost = (int)inspector.field(clusterInfoCostField).asLong();
String type = inspector.field(clusterInfoTypeField).asString();
double flavorCpu = inspector.field(clusterInfoCpuField).asDouble();
double flavorMem = inspector.field(clusterInfoMemField).asDouble();
double flavorDisk = inspector.field(clusterInfoDiskField).asDouble();
List<String> hostnames = new ArrayList<>();
inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString()));
return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames);
}
private ZoneId zoneIdFromSlime(Inspector object) {
return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString());
}
private ApplicationVersion applicationVersionFromSlime(Inspector object) {
if ( ! object.valid()) return ApplicationVersion.unknown;
OptionalLong applicationBuildNumber = Serializers.optionalLong(object.field(applicationBuildNumberField));
if (applicationBuildNumber.isEmpty())
return ApplicationVersion.unknown;
Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField));
Optional<String> authorEmail = Serializers.optionalString(object.field(authorEmailField));
Optional<Version> compileVersion = Serializers.optionalString(object.field(compileVersionField)).map(Version::fromString);
Optional<Instant> buildTime = Serializers.optionalInstant(object.field(buildTimeField));
Optional<String> sourceUrl = Serializers.optionalString(object.field(sourceUrlField));
Optional<String> commit = Serializers.optionalString(object.field(commitField));
return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit);
}
private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
var repository = object.field(repositoryField).asString();
var branch = object.field(branchField).asString();
var commit = object.field(commitField).asString();
if (repository.isBlank() && branch.isBlank() && commit.isBlank()) return Optional.empty();
return Optional.of(new SourceRevision(repository, branch, commit));
}
private Map<JobType, Instant> jobPausesFromSlime(Inspector object) {
Map<JobType, Instant> jobPauses = new HashMap<>();
object.field(jobStatusField).traverse((ArrayTraverser) (__, jobPauseObject) ->
JobType.fromOptionalJobName(jobPauseObject.field(jobTypeField).asString())
.ifPresent(jobType -> jobPauses.put(jobType,
Instant.ofEpochMilli(jobPauseObject.field(pausedUntilField).asLong()))));
return jobPauses;
}
private Change changeFromSlime(Inspector object) {
if ( ! object.valid()) return Change.empty();
Inspector versionFieldValue = object.field(versionField);
Change change = Change.empty();
if (versionFieldValue.valid())
change = Change.of(Version.fromString(versionFieldValue.asString()));
if (object.field(applicationBuildNumberField).valid())
change = change.with(applicationVersionFromSlime(object));
if (object.field(pinnedField).asBool())
change = change.withPin();
return change;
}
private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, InstanceName instance, Inspector root) {
var assignedRotations = new LinkedHashMap<EndpointId, AssignedRotation>();
root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> {
var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString());
var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString());
var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString());
var regions = deploymentSpec.instance(instance)
.map(spec -> globalEndpointRegions(spec, endpointId))
.orElse(Set.of());
assignedRotations.putIfAbsent(endpointId, new AssignedRotation(clusterId, endpointId, rotationId, regions));
});
return List.copyOf(assignedRotations.values());
}
private Set<RegionName> globalEndpointRegions(DeploymentInstanceSpec spec, EndpointId endpointId) {
if (spec.globalServiceId().isPresent())
return spec.zones().stream()
.flatMap(zone -> zone.region().stream())
.collect(Collectors.toSet());
return spec.endpoints().stream()
.filter(endpoint -> endpoint.endpointId().equals(endpointId.id()))
.flatMap(endpoint -> endpoint.regions().stream())
.collect(Collectors.toSet());
}
} |
mixing sent and received ping sequence number here seems like a bad idea | public boolean isLastReceivedPong(long pingId ) {
long last = lastPing.get();
while ((pingId > last) && ! lastPing.compareAndSet(last, pingId)) {
last = pingSequence.get();
}
return last < pingId;
} | last = pingSequence.get(); | public boolean isLastReceivedPong(long pingId ) {
long last = lastPong.get();
while ((pingId > last) && ! lastPong.compareAndSet(last, pingId)) {
last = lastPong.get();
}
return last < pingId;
} | class Node {
private final int key;
private int pathIndex;
private final String hostname;
private final int group;
private final AtomicBoolean statusIsKnown = new AtomicBoolean(false);
private final AtomicBoolean working = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicLong pingSequence = new AtomicLong(0);
private final AtomicLong lastPing = new AtomicLong(0);
public Node(int key, String hostname, int group) {
this.key = key;
this.hostname = hostname;
this.group = group;
}
/** Give a monotonically increasing sequence number.*/
public long createPingSequenceId() { return pingSequence.incrementAndGet(); }
/** Checks if this pong is received in line and accepted, or out of band and should be ignored..*/
public long getLastReceivedPongId() { return lastPing.get(); }
/** Returns the unique and stable distribution key of this node */
public int key() { return key; }
public int pathIndex() { return pathIndex; }
void setPathIndex(int index) {
pathIndex = index;
}
public String hostname() { return hostname; }
/** Returns the id of this group this node belongs to */
public int group() { return group; }
public void setWorking(boolean working) {
this.statusIsKnown.lazySet(true);
this.working.lazySet(working);
if ( ! working ) {
activeDocuments.set(0);
}
}
/** Returns whether this node is currently responding to requests, or null if status is not known */
public Boolean isWorking() {
return statusIsKnown.get() ? working.get() : null;
}
/** Updates the active documents on this node */
void setActiveDocuments(long activeDocuments) {
this.activeDocuments.set(activeDocuments);
}
/** Returns the active documents on this node. If unknown, 0 is returned. */
long getActiveDocuments() {
return activeDocuments.get();
}
@Override
public int hashCode() { return Objects.hash(hostname, key, pathIndex, group); }
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof Node)) return false;
Node other = (Node)o;
if ( ! Objects.equals(this.hostname, other.hostname)) return false;
if ( ! Objects.equals(this.key, other.key)) return false;
if ( ! Objects.equals(this.pathIndex, other.pathIndex)) return false;
if ( ! Objects.equals(this.group, other.group)) return false;
return true;
}
@Override
public String toString() {
return "search node key = " + key + " hostname = "+ hostname + " path = " + pathIndex + " in group " + group +
" statusIsKnown = " + statusIsKnown.get() + " working = " + working.get() +
" activeDocs = " + activeDocuments.get();
}
} | class Node {
private final int key;
private int pathIndex;
private final String hostname;
private final int group;
private final AtomicBoolean statusIsKnown = new AtomicBoolean(false);
private final AtomicBoolean working = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicLong pingSequence = new AtomicLong(0);
private final AtomicLong lastPong = new AtomicLong(0);
public Node(int key, String hostname, int group) {
this.key = key;
this.hostname = hostname;
this.group = group;
}
/** Give a monotonically increasing sequence number.*/
public long createPingSequenceId() { return pingSequence.incrementAndGet(); }
/** Checks if this pong is received in line and accepted, or out of band and should be ignored..*/
public long getLastReceivedPongId() { return lastPong.get(); }
/** Returns the unique and stable distribution key of this node */
public int key() { return key; }
public int pathIndex() { return pathIndex; }
void setPathIndex(int index) {
pathIndex = index;
}
public String hostname() { return hostname; }
/** Returns the id of this group this node belongs to */
public int group() { return group; }
public void setWorking(boolean working) {
this.statusIsKnown.lazySet(true);
this.working.lazySet(working);
if ( ! working ) {
activeDocuments.set(0);
}
}
/** Returns whether this node is currently responding to requests, or null if status is not known */
public Boolean isWorking() {
return statusIsKnown.get() ? working.get() : null;
}
/** Updates the active documents on this node */
void setActiveDocuments(long activeDocuments) {
this.activeDocuments.set(activeDocuments);
}
/** Returns the active documents on this node. If unknown, 0 is returned. */
long getActiveDocuments() {
return activeDocuments.get();
}
@Override
public int hashCode() { return Objects.hash(hostname, key, pathIndex, group); }
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof Node)) return false;
Node other = (Node)o;
if ( ! Objects.equals(this.hostname, other.hostname)) return false;
if ( ! Objects.equals(this.key, other.key)) return false;
if ( ! Objects.equals(this.pathIndex, other.pathIndex)) return false;
if ( ! Objects.equals(this.group, other.group)) return false;
return true;
}
@Override
public String toString() {
return "search node key = " + key + " hostname = "+ hostname + " path = " + pathIndex + " in group " + group +
" statusIsKnown = " + statusIsKnown.get() + " working = " + working.get() +
" activeDocs = " + activeDocuments.get();
}
} |
Fixed. | public boolean isLastReceivedPong(long pingId ) {
long last = lastPing.get();
while ((pingId > last) && ! lastPing.compareAndSet(last, pingId)) {
last = pingSequence.get();
}
return last < pingId;
} | last = pingSequence.get(); | public boolean isLastReceivedPong(long pingId ) {
long last = lastPong.get();
while ((pingId > last) && ! lastPong.compareAndSet(last, pingId)) {
last = lastPong.get();
}
return last < pingId;
} | class Node {
private final int key;
private int pathIndex;
private final String hostname;
private final int group;
private final AtomicBoolean statusIsKnown = new AtomicBoolean(false);
private final AtomicBoolean working = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicLong pingSequence = new AtomicLong(0);
private final AtomicLong lastPing = new AtomicLong(0);
public Node(int key, String hostname, int group) {
this.key = key;
this.hostname = hostname;
this.group = group;
}
/** Give a monotonically increasing sequence number.*/
public long createPingSequenceId() { return pingSequence.incrementAndGet(); }
/** Checks if this pong is received in line and accepted, or out of band and should be ignored..*/
public long getLastReceivedPongId() { return lastPing.get(); }
/** Returns the unique and stable distribution key of this node */
public int key() { return key; }
public int pathIndex() { return pathIndex; }
void setPathIndex(int index) {
pathIndex = index;
}
public String hostname() { return hostname; }
/** Returns the id of this group this node belongs to */
public int group() { return group; }
public void setWorking(boolean working) {
this.statusIsKnown.lazySet(true);
this.working.lazySet(working);
if ( ! working ) {
activeDocuments.set(0);
}
}
/** Returns whether this node is currently responding to requests, or null if status is not known */
public Boolean isWorking() {
return statusIsKnown.get() ? working.get() : null;
}
/** Updates the active documents on this node */
void setActiveDocuments(long activeDocuments) {
this.activeDocuments.set(activeDocuments);
}
/** Returns the active documents on this node. If unknown, 0 is returned. */
long getActiveDocuments() {
return activeDocuments.get();
}
@Override
public int hashCode() { return Objects.hash(hostname, key, pathIndex, group); }
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof Node)) return false;
Node other = (Node)o;
if ( ! Objects.equals(this.hostname, other.hostname)) return false;
if ( ! Objects.equals(this.key, other.key)) return false;
if ( ! Objects.equals(this.pathIndex, other.pathIndex)) return false;
if ( ! Objects.equals(this.group, other.group)) return false;
return true;
}
@Override
public String toString() {
return "search node key = " + key + " hostname = "+ hostname + " path = " + pathIndex + " in group " + group +
" statusIsKnown = " + statusIsKnown.get() + " working = " + working.get() +
" activeDocs = " + activeDocuments.get();
}
} | class Node {
private final int key;
private int pathIndex;
private final String hostname;
private final int group;
private final AtomicBoolean statusIsKnown = new AtomicBoolean(false);
private final AtomicBoolean working = new AtomicBoolean(true);
private final AtomicLong activeDocuments = new AtomicLong(0);
private final AtomicLong pingSequence = new AtomicLong(0);
private final AtomicLong lastPong = new AtomicLong(0);
public Node(int key, String hostname, int group) {
this.key = key;
this.hostname = hostname;
this.group = group;
}
/** Give a monotonically increasing sequence number.*/
public long createPingSequenceId() { return pingSequence.incrementAndGet(); }
/** Checks if this pong is received in line and accepted, or out of band and should be ignored..*/
public long getLastReceivedPongId() { return lastPong.get(); }
/** Returns the unique and stable distribution key of this node */
public int key() { return key; }
public int pathIndex() { return pathIndex; }
void setPathIndex(int index) {
pathIndex = index;
}
public String hostname() { return hostname; }
/** Returns the id of this group this node belongs to */
public int group() { return group; }
public void setWorking(boolean working) {
this.statusIsKnown.lazySet(true);
this.working.lazySet(working);
if ( ! working ) {
activeDocuments.set(0);
}
}
/** Returns whether this node is currently responding to requests, or null if status is not known */
public Boolean isWorking() {
return statusIsKnown.get() ? working.get() : null;
}
/** Updates the active documents on this node */
void setActiveDocuments(long activeDocuments) {
this.activeDocuments.set(activeDocuments);
}
/** Returns the active documents on this node. If unknown, 0 is returned. */
long getActiveDocuments() {
return activeDocuments.get();
}
@Override
public int hashCode() { return Objects.hash(hostname, key, pathIndex, group); }
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof Node)) return false;
Node other = (Node)o;
if ( ! Objects.equals(this.hostname, other.hostname)) return false;
if ( ! Objects.equals(this.key, other.key)) return false;
if ( ! Objects.equals(this.pathIndex, other.pathIndex)) return false;
if ( ! Objects.equals(this.group, other.group)) return false;
return true;
}
@Override
public String toString() {
return "search node key = " + key + " hostname = "+ hostname + " path = " + pathIndex + " in group " + group +
" statusIsKnown = " + statusIsKnown.get() + " working = " + working.get() +
" activeDocs = " + activeDocuments.get();
}
} |
if (isRanked(child)) return true; No need to iterate all children if one is unhappy. | private static boolean isRanked(Item item) {
if (item instanceof CompositeItem) {
boolean isRanked = false;
for (Item child : ((CompositeItem)item).items())
isRanked |= isRanked(child);
return isRanked;
}
else if (item instanceof HasIndexItem && Hit.SDDOCNAME_FIELD.equals(((HasIndexItem)item).getIndexName())) {
return false;
}
else {
return item.isRanked();
}
} | isRanked |= isRanked(child); | private static boolean isRanked(Item item) {
if (item instanceof CompositeItem) {
for (Item child : ((CompositeItem)item).items())
if (isRanked(child)) return true;
return false;
}
else if (item instanceof HasIndexItem && Hit.SDDOCNAME_FIELD.equals(((HasIndexItem)item).getIndexName())) {
return false;
}
else {
return item.isRanked();
}
} | class QueryRewrite {
private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL }
/**
* Optimize multiple NotItems under and or by collapsing them in to one and leaving
* the positive ones behind in its place and moving itself with the original and as its positive item
* and the union of all the negative items of all the original NotItems as its negative items.
*/
public static void optimizeAndNot(Query query) {
Item root = query.getModel().getQueryTree().getRoot();
Item possibleNewRoot = optimizeAndNot(root);
if (root != possibleNewRoot) {
query.getModel().getQueryTree().setRoot(possibleNewRoot);
}
}
/**
* Optimizes the given query tree based on its {@link Model
*/
public static void optimizeByRestrict(Query query) {
if (query.getModel().getRestrict().size() != 1) {
return;
}
Item root = query.getModel().getQueryTree().getRoot();
if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) {
query.getModel().getQueryTree().setRoot(new NullItem());
}
}
/**
* Collapses all single-child {@link CompositeItem}s into their parent item.
*/
public static void collapseSingleComposites(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = collapseSingleComposites(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
/**
* Replaces and {@link SimpleIndexedItem} searching in the {@link Hit
* appropriate for the search node.
*/
public static void rewriteSddocname(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = rewriteSddocname(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
private static Item optimizeAndNot(Item node) {
if (node instanceof CompositeItem) {
return extractAndNotRecursively((CompositeItem) node);
}
return node;
}
private static CompositeItem extractAndNotRecursively(CompositeItem parent) {
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
Item possibleNewChild = optimizeAndNot(child);
if (child != possibleNewChild) {
parent.setItem(i, possibleNewChild);
}
}
if (parent instanceof AndItem) {
return extractAndNot((AndItem) parent);
}
return parent;
}
private static CompositeItem extractAndNot(AndItem parent) {
NotItem theOnlyNot = null;
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
if (child instanceof NotItem) {
NotItem thisNot = (NotItem) child;
parent.setItem(i, thisNot.getPositiveItem());
if (theOnlyNot == null) {
theOnlyNot = thisNot;
theOnlyNot.setPositiveItem(parent);
} else {
for (int j=1; j < thisNot.getItemCount(); j++) {
theOnlyNot.addNegativeItem(thisNot.getItem(j));
}
}
}
}
return (theOnlyNot != null) ? theOnlyNot : parent;
}
private static Recall optimizeByRestrict(Item item, String restrictParam) {
if (item instanceof SimpleIndexedItem) {
return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam);
} else if (item instanceof NotItem) {
return optimizeNotItemByRestrict((NotItem)item, restrictParam);
} else if (item instanceof CompositeItem) {
return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam);
} else {
return Recall.UNKNOWN_RECALL;
}
}
private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) {
if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) {
return Recall.UNKNOWN_RECALL;
}
return restrictParam.equals(item.getIndexedString())
? Recall.RECALLS_EVERYTHING
: Recall.RECALLS_NOTHING;
}
private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) {
if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) {
return Recall.RECALLS_NOTHING;
}
for (int i = item.getItemCount(); --i >= 1; ) {
Item child = item.getItem(i);
switch (optimizeByRestrict(child, restrictParam)) {
case RECALLS_EVERYTHING:
return Recall.RECALLS_NOTHING;
case RECALLS_NOTHING:
item.removeItem(i);
break;
}
}
return Recall.UNKNOWN_RECALL;
}
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) {
Recall recall = Recall.UNKNOWN_RECALL;
for (int i = item.getItemCount(); --i >= 0; ) {
switch (optimizeByRestrict(item.getItem(i), restrictParam)) {
case RECALLS_EVERYTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
removeOtherNonrankedChildren(item, i);
recall = Recall.RECALLS_EVERYTHING;
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
if ( ! isRanked(item.getItem(i))) {
item.removeItem(i);
}
} else if (item instanceof RankItem) {
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
case RECALLS_NOTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
item.removeItem(i);
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
return Recall.RECALLS_NOTHING;
} else if (item instanceof RankItem) {
item.removeItem(i);
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
}
}
return recall;
}
private static void removeOtherNonrankedChildren(CompositeItem parent, int indexOfChildToKeep) {
Item childToKeep = parent.getItem(indexOfChildToKeep);
for (int i = parent.getItemCount(); --i >= 0; ) {
Item child = parent.getItem(i);
if ( child != childToKeep && ! parent.getItem(i).isRanked())
parent.removeItem(i);
}
}
private static Item collapseSingleComposites(Item item) {
if (!(item instanceof CompositeItem)) {
return item;
}
CompositeItem parent = (CompositeItem)item;
int numChildren = parent.getItemCount();
for (int i = 0; i < numChildren; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = collapseSingleComposites(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
return parent.extractSingleChild().orElse(item);
}
private static Item rewriteSddocname(Item item) {
if (item instanceof CompositeItem) {
CompositeItem parent = (CompositeItem)item;
for (int i = 0, len = parent.getItemCount(); i < len; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = rewriteSddocname(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
} else if (item instanceof SimpleIndexedItem) {
SimpleIndexedItem oldItem = (SimpleIndexedItem)item;
if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) {
SubstringItem newItem = new SubstringItem(oldItem.getIndexedString());
newItem.setIndexName("[documentmetastore]");
return newItem;
}
}
return item;
}
} | class QueryRewrite {
private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL }
/**
* Optimize multiple NotItems under and or by collapsing them in to one and leaving
* the positive ones behind in its place and moving itself with the original and as its positive item
* and the union of all the negative items of all the original NotItems as its negative items.
*/
public static void optimizeAndNot(Query query) {
Item root = query.getModel().getQueryTree().getRoot();
Item possibleNewRoot = optimizeAndNot(root);
if (root != possibleNewRoot) {
query.getModel().getQueryTree().setRoot(possibleNewRoot);
}
}
/**
* Optimizes the given query tree based on its {@link Model
*/
public static void optimizeByRestrict(Query query) {
if (query.getModel().getRestrict().size() != 1) {
return;
}
Item root = query.getModel().getQueryTree().getRoot();
if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) {
query.getModel().getQueryTree().setRoot(new NullItem());
}
}
/**
* Collapses all single-child {@link CompositeItem}s into their parent item.
*/
public static void collapseSingleComposites(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = collapseSingleComposites(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
/**
* Replaces and {@link SimpleIndexedItem} searching in the {@link Hit
* appropriate for the search node.
*/
public static void rewriteSddocname(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = rewriteSddocname(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
private static Item optimizeAndNot(Item node) {
if (node instanceof CompositeItem) {
return extractAndNotRecursively((CompositeItem) node);
}
return node;
}
private static CompositeItem extractAndNotRecursively(CompositeItem parent) {
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
Item possibleNewChild = optimizeAndNot(child);
if (child != possibleNewChild) {
parent.setItem(i, possibleNewChild);
}
}
if (parent instanceof AndItem) {
return extractAndNot((AndItem) parent);
}
return parent;
}
private static CompositeItem extractAndNot(AndItem parent) {
NotItem theOnlyNot = null;
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
if (child instanceof NotItem) {
NotItem thisNot = (NotItem) child;
parent.setItem(i, thisNot.getPositiveItem());
if (theOnlyNot == null) {
theOnlyNot = thisNot;
theOnlyNot.setPositiveItem(parent);
} else {
for (int j=1; j < thisNot.getItemCount(); j++) {
theOnlyNot.addNegativeItem(thisNot.getItem(j));
}
}
}
}
return (theOnlyNot != null) ? theOnlyNot : parent;
}
private static Recall optimizeByRestrict(Item item, String restrictParam) {
if (item instanceof SimpleIndexedItem) {
return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam);
} else if (item instanceof NotItem) {
return optimizeNotItemByRestrict((NotItem)item, restrictParam);
} else if (item instanceof CompositeItem) {
return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam);
} else {
return Recall.UNKNOWN_RECALL;
}
}
private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) {
if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) {
return Recall.UNKNOWN_RECALL;
}
return restrictParam.equals(item.getIndexedString())
? Recall.RECALLS_EVERYTHING
: Recall.RECALLS_NOTHING;
}
private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) {
if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) {
return Recall.RECALLS_NOTHING;
}
for (int i = item.getItemCount(); --i >= 1; ) {
Item child = item.getItem(i);
switch (optimizeByRestrict(child, restrictParam)) {
case RECALLS_EVERYTHING:
return Recall.RECALLS_NOTHING;
case RECALLS_NOTHING:
item.removeItem(i);
break;
}
}
return Recall.UNKNOWN_RECALL;
}
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) {
Recall recall = Recall.UNKNOWN_RECALL;
for (int i = item.getItemCount(); --i >= 0; ) {
switch (optimizeByRestrict(item.getItem(i), restrictParam)) {
case RECALLS_EVERYTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
removeOtherNonrankedChildren(item, i);
recall = Recall.RECALLS_EVERYTHING;
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
if ( ! isRanked(item.getItem(i))) {
item.removeItem(i);
}
} else if (item instanceof RankItem) {
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
case RECALLS_NOTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
item.removeItem(i);
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
return Recall.RECALLS_NOTHING;
} else if (item instanceof RankItem) {
item.removeItem(i);
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
}
}
return recall;
}
private static void removeOtherNonrankedChildren(CompositeItem parent, int indexOfChildToKeep) {
Item childToKeep = parent.getItem(indexOfChildToKeep);
for (int i = parent.getItemCount(); --i >= 0; ) {
Item child = parent.getItem(i);
if ( child != childToKeep && ! parent.getItem(i).isRanked())
parent.removeItem(i);
}
}
private static Item collapseSingleComposites(Item item) {
if (!(item instanceof CompositeItem)) {
return item;
}
CompositeItem parent = (CompositeItem)item;
int numChildren = parent.getItemCount();
for (int i = 0; i < numChildren; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = collapseSingleComposites(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
return parent.extractSingleChild().orElse(item);
}
private static Item rewriteSddocname(Item item) {
if (item instanceof CompositeItem) {
CompositeItem parent = (CompositeItem)item;
for (int i = 0, len = parent.getItemCount(); i < len; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = rewriteSddocname(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
} else if (item instanceof SimpleIndexedItem) {
SimpleIndexedItem oldItem = (SimpleIndexedItem)item;
if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) {
SubstringItem newItem = new SubstringItem(oldItem.getIndexedString());
newItem.setIndexName("[documentmetastore]");
return newItem;
}
}
return item;
}
} |
By returning existing here we will modify the original instance | public RevisionHistory with(ApplicationVersion revision, JobId job) {
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(this.development);
NavigableMap<RevisionId, ApplicationVersion> revisions = development.computeIfAbsent(job, __ -> new TreeMap<>());
if ( ! revisions.isEmpty()) revisions.compute(revisions.lastKey(), (__, last) -> last.withoutPackage());
revisions.put(revision.id(), revision);
return new RevisionHistory(production, development);
} | NavigableMap<RevisionId, ApplicationVersion> revisions = development.computeIfAbsent(job, __ -> new TreeMap<>()); | public RevisionHistory with(ApplicationVersion revision, JobId job) {
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(this.development);
NavigableMap<RevisionId, ApplicationVersion> revisions = development.computeIfAbsent(job, __ -> new TreeMap<>());
if ( ! revisions.isEmpty()) revisions.compute(revisions.lastKey(), (__, last) -> last.withoutPackage());
revisions.put(revision.id(), revision);
return new RevisionHistory(production, development);
} | class RevisionHistory {
private static final Comparator<JobId> comparator = Comparator.comparing(JobId::application).thenComparing(JobId::type);
private final NavigableMap<RevisionId, ApplicationVersion> production;
private final NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development;
private RevisionHistory(NavigableMap<RevisionId, ApplicationVersion> production,
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development) {
this.production = production;
this.development = development;
}
public static RevisionHistory empty() {
return ofRevisions(List.of(), Map.of());
}
public static RevisionHistory ofRevisions(Collection<ApplicationVersion> productionRevisions,
Map<JobId, ? extends Collection<ApplicationVersion>> developmentRevisions) {
NavigableMap<RevisionId, ApplicationVersion> production = new TreeMap<>();
for (ApplicationVersion revision : productionRevisions)
production.put(revision.id(), revision);
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(comparator);
developmentRevisions.forEach((job, jobRevisions) -> {
NavigableMap<RevisionId, ApplicationVersion> revisions = development.computeIfAbsent(job, __ -> new TreeMap<>());
for (ApplicationVersion revision : jobRevisions)
revisions.put(revision.id(), revision);
});
return new RevisionHistory(production, development);
}
/** Returns a copy of this with given production revision forgotten. */
public RevisionHistory without(RevisionId id) {
if ( ! production.containsKey(id)) return this;
TreeMap<RevisionId, ApplicationVersion> production = new TreeMap<>(this.production);
production.remove(id);
return new RevisionHistory(production, development);
}
/** Returns a copy of this with the given development revision forgotten. */
public RevisionHistory without(RevisionId id, JobId job) {
if ( ! development.containsKey(job) || ! development.get(job).containsKey(id)) return this;
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(this.development);
development.get(job).remove(id);
return new RevisionHistory(production, development);
}
/** Returns a copy of this with the production revision added or updated */
public RevisionHistory with(ApplicationVersion revision) {
NavigableMap<RevisionId, ApplicationVersion> production = new TreeMap<>(this.production);
production.put(revision.id(), revision);
return new RevisionHistory(production, development);
}
/** Returns a copy of this with the new development revision added, and the previous version without a package. */
private static ApplicationVersion revisionOf(RevisionId id, boolean production) {
return new ApplicationVersion(Optional.empty(), OptionalLong.of(id.number()), Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(),
! production, Optional.empty(), false, false);
}
/** Returns the production {@link ApplicationVersion} with this revision ID. */
public ApplicationVersion get(RevisionId id) {
return production.getOrDefault(id, revisionOf(id, true));
}
/** Returns the development {@link ApplicationVersion} for the give job, with this revision ID. */
public ApplicationVersion get(RevisionId id, JobId job) {
return development.getOrDefault(job, Collections.emptyNavigableMap())
.getOrDefault(id, revisionOf(id, false));
}
/** Returns the last submitted production build. */
public Optional<ApplicationVersion> last() {
return Optional.ofNullable(production.lastEntry()).map(Map.Entry::getValue);
}
/** Returns all known production revisions we still have the package for, from oldest to newest. */
public List<ApplicationVersion> withPackage() {
return production.values().stream()
.filter(ApplicationVersion::hasPackage)
.collect(toList());
}
/** Returns the currently deployable revisions of the application. */
public Deque<ApplicationVersion> deployable(boolean ascending) {
Deque<ApplicationVersion> versions = new ArrayDeque<>();
String previousHash = "";
for (ApplicationVersion version : withPackage()) {
if (version.isDeployable() && (version.bundleHash().isEmpty() || ! previousHash.equals(version.bundleHash().get()))) {
if (ascending) versions.addLast(version);
else versions.addFirst(version);
}
previousHash = version.bundleHash().orElse("");
}
return versions;
}
/** All known production revisions, in ascending order. */
public List<ApplicationVersion> production() {
return List.copyOf(production.values());
}
/* All known development revisions, in ascending order, per job. */
public NavigableMap<JobId, List<ApplicationVersion>> development() {
NavigableMap<JobId, List<ApplicationVersion>> copy = new TreeMap<>(comparator);
development.forEach((job, revisions) -> copy.put(job, List.copyOf(revisions.values())));
return Collections.unmodifiableNavigableMap(copy);
}
} | class RevisionHistory {
private static final Comparator<JobId> comparator = Comparator.comparing(JobId::application).thenComparing(JobId::type);
private final NavigableMap<RevisionId, ApplicationVersion> production;
private final NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development;
private RevisionHistory(NavigableMap<RevisionId, ApplicationVersion> production,
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development) {
this.production = production;
this.development = development;
}
public static RevisionHistory empty() {
return ofRevisions(List.of(), Map.of());
}
public static RevisionHistory ofRevisions(Collection<ApplicationVersion> productionRevisions,
Map<JobId, ? extends Collection<ApplicationVersion>> developmentRevisions) {
NavigableMap<RevisionId, ApplicationVersion> production = new TreeMap<>();
for (ApplicationVersion revision : productionRevisions)
production.put(revision.id(), revision);
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(comparator);
developmentRevisions.forEach((job, jobRevisions) -> {
NavigableMap<RevisionId, ApplicationVersion> revisions = development.computeIfAbsent(job, __ -> new TreeMap<>());
for (ApplicationVersion revision : jobRevisions)
revisions.put(revision.id(), revision);
});
return new RevisionHistory(production, development);
}
/** Returns a copy of this without any production revisions older than the given. */
public RevisionHistory withoutOlderThan(RevisionId id) {
if (production.headMap(id).isEmpty()) return this;
return new RevisionHistory(production.tailMap(id, true), development);
}
/** Returns a copy of this without any development revisions older than the given. */
public RevisionHistory withoutOlderThan(RevisionId id, JobId job) {
if ( ! development.containsKey(job) || development.get(job).headMap(id).isEmpty()) return this;
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(this.development);
development.compute(job, (__, revisions) -> revisions.tailMap(id, true));
return new RevisionHistory(production, development);
}
/** Returns a copy of this with the production revision added or updated */
public RevisionHistory with(ApplicationVersion revision) {
NavigableMap<RevisionId, ApplicationVersion> production = new TreeMap<>(this.production);
production.put(revision.id(), revision);
return new RevisionHistory(production, development);
}
/** Returns a copy of this with the new development revision added, and the previous version without a package. */
private static ApplicationVersion revisionOf(RevisionId id, boolean production) {
return new ApplicationVersion(Optional.empty(), OptionalLong.of(id.number()), Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(),
! production, Optional.empty(), false, false);
}
/** Returns the production {@link ApplicationVersion} with this revision ID. */
public ApplicationVersion get(RevisionId id) {
return production.getOrDefault(id, revisionOf(id, true));
}
/** Returns the development {@link ApplicationVersion} for the give job, with this revision ID. */
public ApplicationVersion get(RevisionId id, JobId job) {
return development.getOrDefault(job, Collections.emptyNavigableMap())
.getOrDefault(id, revisionOf(id, false));
}
/** Returns the last submitted production build. */
public Optional<ApplicationVersion> last() {
return Optional.ofNullable(production.lastEntry()).map(Map.Entry::getValue);
}
/** Returns all known production revisions we still have the package for, from oldest to newest. */
public List<ApplicationVersion> withPackage() {
return production.values().stream()
.filter(ApplicationVersion::hasPackage)
.collect(toList());
}
/** Returns the currently deployable revisions of the application. */
public Deque<ApplicationVersion> deployable(boolean ascending) {
Deque<ApplicationVersion> versions = new ArrayDeque<>();
String previousHash = "";
for (ApplicationVersion version : withPackage()) {
if (version.isDeployable() && (version.bundleHash().isEmpty() || ! previousHash.equals(version.bundleHash().get()))) {
if (ascending) versions.addLast(version);
else versions.addFirst(version);
}
previousHash = version.bundleHash().orElse("");
}
return versions;
}
/** All known production revisions, in ascending order. */
public List<ApplicationVersion> production() {
return List.copyOf(production.values());
}
/* All known development revisions, in ascending order, per job. */
public NavigableMap<JobId, List<ApplicationVersion>> development() {
NavigableMap<JobId, List<ApplicationVersion>> copy = new TreeMap<>(comparator);
development.forEach((job, revisions) -> copy.put(job, List.copyOf(revisions.values())));
return Collections.unmodifiableNavigableMap(copy);
}
} |
return false; | private static boolean isRanked(Item item) {
if (item instanceof CompositeItem) {
boolean isRanked = false;
for (Item child : ((CompositeItem)item).items())
isRanked |= isRanked(child);
return isRanked;
}
else if (item instanceof HasIndexItem && Hit.SDDOCNAME_FIELD.equals(((HasIndexItem)item).getIndexName())) {
return false;
}
else {
return item.isRanked();
}
} | return isRanked; | private static boolean isRanked(Item item) {
if (item instanceof CompositeItem) {
for (Item child : ((CompositeItem)item).items())
if (isRanked(child)) return true;
return false;
}
else if (item instanceof HasIndexItem && Hit.SDDOCNAME_FIELD.equals(((HasIndexItem)item).getIndexName())) {
return false;
}
else {
return item.isRanked();
}
} | class QueryRewrite {
private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL }
/**
* Optimize multiple NotItems under and or by collapsing them in to one and leaving
* the positive ones behind in its place and moving itself with the original and as its positive item
* and the union of all the negative items of all the original NotItems as its negative items.
*/
public static void optimizeAndNot(Query query) {
Item root = query.getModel().getQueryTree().getRoot();
Item possibleNewRoot = optimizeAndNot(root);
if (root != possibleNewRoot) {
query.getModel().getQueryTree().setRoot(possibleNewRoot);
}
}
/**
* Optimizes the given query tree based on its {@link Model
*/
public static void optimizeByRestrict(Query query) {
if (query.getModel().getRestrict().size() != 1) {
return;
}
Item root = query.getModel().getQueryTree().getRoot();
if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) {
query.getModel().getQueryTree().setRoot(new NullItem());
}
}
/**
* Collapses all single-child {@link CompositeItem}s into their parent item.
*/
public static void collapseSingleComposites(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = collapseSingleComposites(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
/**
* Replaces and {@link SimpleIndexedItem} searching in the {@link Hit
* appropriate for the search node.
*/
public static void rewriteSddocname(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = rewriteSddocname(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
private static Item optimizeAndNot(Item node) {
if (node instanceof CompositeItem) {
return extractAndNotRecursively((CompositeItem) node);
}
return node;
}
private static CompositeItem extractAndNotRecursively(CompositeItem parent) {
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
Item possibleNewChild = optimizeAndNot(child);
if (child != possibleNewChild) {
parent.setItem(i, possibleNewChild);
}
}
if (parent instanceof AndItem) {
return extractAndNot((AndItem) parent);
}
return parent;
}
private static CompositeItem extractAndNot(AndItem parent) {
NotItem theOnlyNot = null;
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
if (child instanceof NotItem) {
NotItem thisNot = (NotItem) child;
parent.setItem(i, thisNot.getPositiveItem());
if (theOnlyNot == null) {
theOnlyNot = thisNot;
theOnlyNot.setPositiveItem(parent);
} else {
for (int j=1; j < thisNot.getItemCount(); j++) {
theOnlyNot.addNegativeItem(thisNot.getItem(j));
}
}
}
}
return (theOnlyNot != null) ? theOnlyNot : parent;
}
private static Recall optimizeByRestrict(Item item, String restrictParam) {
if (item instanceof SimpleIndexedItem) {
return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam);
} else if (item instanceof NotItem) {
return optimizeNotItemByRestrict((NotItem)item, restrictParam);
} else if (item instanceof CompositeItem) {
return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam);
} else {
return Recall.UNKNOWN_RECALL;
}
}
private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) {
if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) {
return Recall.UNKNOWN_RECALL;
}
return restrictParam.equals(item.getIndexedString())
? Recall.RECALLS_EVERYTHING
: Recall.RECALLS_NOTHING;
}
private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) {
if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) {
return Recall.RECALLS_NOTHING;
}
for (int i = item.getItemCount(); --i >= 1; ) {
Item child = item.getItem(i);
switch (optimizeByRestrict(child, restrictParam)) {
case RECALLS_EVERYTHING:
return Recall.RECALLS_NOTHING;
case RECALLS_NOTHING:
item.removeItem(i);
break;
}
}
return Recall.UNKNOWN_RECALL;
}
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) {
Recall recall = Recall.UNKNOWN_RECALL;
for (int i = item.getItemCount(); --i >= 0; ) {
switch (optimizeByRestrict(item.getItem(i), restrictParam)) {
case RECALLS_EVERYTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
removeOtherNonrankedChildren(item, i);
recall = Recall.RECALLS_EVERYTHING;
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
if ( ! isRanked(item.getItem(i))) {
item.removeItem(i);
}
} else if (item instanceof RankItem) {
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
case RECALLS_NOTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
item.removeItem(i);
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
return Recall.RECALLS_NOTHING;
} else if (item instanceof RankItem) {
item.removeItem(i);
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
}
}
return recall;
}
private static void removeOtherNonrankedChildren(CompositeItem parent, int indexOfChildToKeep) {
Item childToKeep = parent.getItem(indexOfChildToKeep);
for (int i = parent.getItemCount(); --i >= 0; ) {
Item child = parent.getItem(i);
if ( child != childToKeep && ! parent.getItem(i).isRanked())
parent.removeItem(i);
}
}
private static Item collapseSingleComposites(Item item) {
if (!(item instanceof CompositeItem)) {
return item;
}
CompositeItem parent = (CompositeItem)item;
int numChildren = parent.getItemCount();
for (int i = 0; i < numChildren; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = collapseSingleComposites(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
return parent.extractSingleChild().orElse(item);
}
private static Item rewriteSddocname(Item item) {
if (item instanceof CompositeItem) {
CompositeItem parent = (CompositeItem)item;
for (int i = 0, len = parent.getItemCount(); i < len; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = rewriteSddocname(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
} else if (item instanceof SimpleIndexedItem) {
SimpleIndexedItem oldItem = (SimpleIndexedItem)item;
if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) {
SubstringItem newItem = new SubstringItem(oldItem.getIndexedString());
newItem.setIndexName("[documentmetastore]");
return newItem;
}
}
return item;
}
} | class QueryRewrite {
private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL }
/**
* Optimize multiple NotItems under and or by collapsing them in to one and leaving
* the positive ones behind in its place and moving itself with the original and as its positive item
* and the union of all the negative items of all the original NotItems as its negative items.
*/
public static void optimizeAndNot(Query query) {
Item root = query.getModel().getQueryTree().getRoot();
Item possibleNewRoot = optimizeAndNot(root);
if (root != possibleNewRoot) {
query.getModel().getQueryTree().setRoot(possibleNewRoot);
}
}
/**
* Optimizes the given query tree based on its {@link Model
*/
public static void optimizeByRestrict(Query query) {
if (query.getModel().getRestrict().size() != 1) {
return;
}
Item root = query.getModel().getQueryTree().getRoot();
if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) {
query.getModel().getQueryTree().setRoot(new NullItem());
}
}
/**
* Collapses all single-child {@link CompositeItem}s into their parent item.
*/
public static void collapseSingleComposites(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = collapseSingleComposites(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
/**
* Replaces and {@link SimpleIndexedItem} searching in the {@link Hit
* appropriate for the search node.
*/
public static void rewriteSddocname(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = rewriteSddocname(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
private static Item optimizeAndNot(Item node) {
if (node instanceof CompositeItem) {
return extractAndNotRecursively((CompositeItem) node);
}
return node;
}
private static CompositeItem extractAndNotRecursively(CompositeItem parent) {
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
Item possibleNewChild = optimizeAndNot(child);
if (child != possibleNewChild) {
parent.setItem(i, possibleNewChild);
}
}
if (parent instanceof AndItem) {
return extractAndNot((AndItem) parent);
}
return parent;
}
private static CompositeItem extractAndNot(AndItem parent) {
NotItem theOnlyNot = null;
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
if (child instanceof NotItem) {
NotItem thisNot = (NotItem) child;
parent.setItem(i, thisNot.getPositiveItem());
if (theOnlyNot == null) {
theOnlyNot = thisNot;
theOnlyNot.setPositiveItem(parent);
} else {
for (int j=1; j < thisNot.getItemCount(); j++) {
theOnlyNot.addNegativeItem(thisNot.getItem(j));
}
}
}
}
return (theOnlyNot != null) ? theOnlyNot : parent;
}
private static Recall optimizeByRestrict(Item item, String restrictParam) {
if (item instanceof SimpleIndexedItem) {
return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam);
} else if (item instanceof NotItem) {
return optimizeNotItemByRestrict((NotItem)item, restrictParam);
} else if (item instanceof CompositeItem) {
return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam);
} else {
return Recall.UNKNOWN_RECALL;
}
}
private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) {
if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) {
return Recall.UNKNOWN_RECALL;
}
return restrictParam.equals(item.getIndexedString())
? Recall.RECALLS_EVERYTHING
: Recall.RECALLS_NOTHING;
}
private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) {
if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) {
return Recall.RECALLS_NOTHING;
}
for (int i = item.getItemCount(); --i >= 1; ) {
Item child = item.getItem(i);
switch (optimizeByRestrict(child, restrictParam)) {
case RECALLS_EVERYTHING:
return Recall.RECALLS_NOTHING;
case RECALLS_NOTHING:
item.removeItem(i);
break;
}
}
return Recall.UNKNOWN_RECALL;
}
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) {
Recall recall = Recall.UNKNOWN_RECALL;
for (int i = item.getItemCount(); --i >= 0; ) {
switch (optimizeByRestrict(item.getItem(i), restrictParam)) {
case RECALLS_EVERYTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
removeOtherNonrankedChildren(item, i);
recall = Recall.RECALLS_EVERYTHING;
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
if ( ! isRanked(item.getItem(i))) {
item.removeItem(i);
}
} else if (item instanceof RankItem) {
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
case RECALLS_NOTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
item.removeItem(i);
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
return Recall.RECALLS_NOTHING;
} else if (item instanceof RankItem) {
item.removeItem(i);
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
}
}
return recall;
}
private static void removeOtherNonrankedChildren(CompositeItem parent, int indexOfChildToKeep) {
Item childToKeep = parent.getItem(indexOfChildToKeep);
for (int i = parent.getItemCount(); --i >= 0; ) {
Item child = parent.getItem(i);
if ( child != childToKeep && ! parent.getItem(i).isRanked())
parent.removeItem(i);
}
}
private static Item collapseSingleComposites(Item item) {
if (!(item instanceof CompositeItem)) {
return item;
}
CompositeItem parent = (CompositeItem)item;
int numChildren = parent.getItemCount();
for (int i = 0; i < numChildren; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = collapseSingleComposites(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
return parent.extractSingleChild().orElse(item);
}
private static Item rewriteSddocname(Item item) {
if (item instanceof CompositeItem) {
CompositeItem parent = (CompositeItem)item;
for (int i = 0, len = parent.getItemCount(); i < len; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = rewriteSddocname(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
} else if (item instanceof SimpleIndexedItem) {
SimpleIndexedItem oldItem = (SimpleIndexedItem)item;
if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) {
SubstringItem newItem = new SubstringItem(oldItem.getIndexedString());
newItem.setIndexName("[documentmetastore]");
return newItem;
}
}
return item;
}
} |
ok, done | private static boolean isRanked(Item item) {
if (item instanceof CompositeItem) {
boolean isRanked = false;
for (Item child : ((CompositeItem)item).items())
isRanked |= isRanked(child);
return isRanked;
}
else if (item instanceof HasIndexItem && Hit.SDDOCNAME_FIELD.equals(((HasIndexItem)item).getIndexName())) {
return false;
}
else {
return item.isRanked();
}
} | return isRanked; | private static boolean isRanked(Item item) {
if (item instanceof CompositeItem) {
for (Item child : ((CompositeItem)item).items())
if (isRanked(child)) return true;
return false;
}
else if (item instanceof HasIndexItem && Hit.SDDOCNAME_FIELD.equals(((HasIndexItem)item).getIndexName())) {
return false;
}
else {
return item.isRanked();
}
} | class QueryRewrite {
private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL }
/**
* Optimize multiple NotItems under and or by collapsing them in to one and leaving
* the positive ones behind in its place and moving itself with the original and as its positive item
* and the union of all the negative items of all the original NotItems as its negative items.
*/
public static void optimizeAndNot(Query query) {
Item root = query.getModel().getQueryTree().getRoot();
Item possibleNewRoot = optimizeAndNot(root);
if (root != possibleNewRoot) {
query.getModel().getQueryTree().setRoot(possibleNewRoot);
}
}
/**
* Optimizes the given query tree based on its {@link Model
*/
public static void optimizeByRestrict(Query query) {
if (query.getModel().getRestrict().size() != 1) {
return;
}
Item root = query.getModel().getQueryTree().getRoot();
if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) {
query.getModel().getQueryTree().setRoot(new NullItem());
}
}
/**
* Collapses all single-child {@link CompositeItem}s into their parent item.
*/
public static void collapseSingleComposites(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = collapseSingleComposites(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
/**
* Replaces and {@link SimpleIndexedItem} searching in the {@link Hit
* appropriate for the search node.
*/
public static void rewriteSddocname(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = rewriteSddocname(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
private static Item optimizeAndNot(Item node) {
if (node instanceof CompositeItem) {
return extractAndNotRecursively((CompositeItem) node);
}
return node;
}
private static CompositeItem extractAndNotRecursively(CompositeItem parent) {
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
Item possibleNewChild = optimizeAndNot(child);
if (child != possibleNewChild) {
parent.setItem(i, possibleNewChild);
}
}
if (parent instanceof AndItem) {
return extractAndNot((AndItem) parent);
}
return parent;
}
private static CompositeItem extractAndNot(AndItem parent) {
NotItem theOnlyNot = null;
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
if (child instanceof NotItem) {
NotItem thisNot = (NotItem) child;
parent.setItem(i, thisNot.getPositiveItem());
if (theOnlyNot == null) {
theOnlyNot = thisNot;
theOnlyNot.setPositiveItem(parent);
} else {
for (int j=1; j < thisNot.getItemCount(); j++) {
theOnlyNot.addNegativeItem(thisNot.getItem(j));
}
}
}
}
return (theOnlyNot != null) ? theOnlyNot : parent;
}
private static Recall optimizeByRestrict(Item item, String restrictParam) {
if (item instanceof SimpleIndexedItem) {
return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam);
} else if (item instanceof NotItem) {
return optimizeNotItemByRestrict((NotItem)item, restrictParam);
} else if (item instanceof CompositeItem) {
return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam);
} else {
return Recall.UNKNOWN_RECALL;
}
}
private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) {
if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) {
return Recall.UNKNOWN_RECALL;
}
return restrictParam.equals(item.getIndexedString())
? Recall.RECALLS_EVERYTHING
: Recall.RECALLS_NOTHING;
}
private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) {
if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) {
return Recall.RECALLS_NOTHING;
}
for (int i = item.getItemCount(); --i >= 1; ) {
Item child = item.getItem(i);
switch (optimizeByRestrict(child, restrictParam)) {
case RECALLS_EVERYTHING:
return Recall.RECALLS_NOTHING;
case RECALLS_NOTHING:
item.removeItem(i);
break;
}
}
return Recall.UNKNOWN_RECALL;
}
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) {
Recall recall = Recall.UNKNOWN_RECALL;
for (int i = item.getItemCount(); --i >= 0; ) {
switch (optimizeByRestrict(item.getItem(i), restrictParam)) {
case RECALLS_EVERYTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
removeOtherNonrankedChildren(item, i);
recall = Recall.RECALLS_EVERYTHING;
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
if ( ! isRanked(item.getItem(i))) {
item.removeItem(i);
}
} else if (item instanceof RankItem) {
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
case RECALLS_NOTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
item.removeItem(i);
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
return Recall.RECALLS_NOTHING;
} else if (item instanceof RankItem) {
item.removeItem(i);
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
}
}
return recall;
}
private static void removeOtherNonrankedChildren(CompositeItem parent, int indexOfChildToKeep) {
Item childToKeep = parent.getItem(indexOfChildToKeep);
for (int i = parent.getItemCount(); --i >= 0; ) {
Item child = parent.getItem(i);
if ( child != childToKeep && ! parent.getItem(i).isRanked())
parent.removeItem(i);
}
}
private static Item collapseSingleComposites(Item item) {
if (!(item instanceof CompositeItem)) {
return item;
}
CompositeItem parent = (CompositeItem)item;
int numChildren = parent.getItemCount();
for (int i = 0; i < numChildren; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = collapseSingleComposites(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
return parent.extractSingleChild().orElse(item);
}
private static Item rewriteSddocname(Item item) {
if (item instanceof CompositeItem) {
CompositeItem parent = (CompositeItem)item;
for (int i = 0, len = parent.getItemCount(); i < len; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = rewriteSddocname(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
} else if (item instanceof SimpleIndexedItem) {
SimpleIndexedItem oldItem = (SimpleIndexedItem)item;
if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) {
SubstringItem newItem = new SubstringItem(oldItem.getIndexedString());
newItem.setIndexName("[documentmetastore]");
return newItem;
}
}
return item;
}
} | class QueryRewrite {
private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL }
/**
* Optimize multiple NotItems under and or by collapsing them in to one and leaving
* the positive ones behind in its place and moving itself with the original and as its positive item
* and the union of all the negative items of all the original NotItems as its negative items.
*/
public static void optimizeAndNot(Query query) {
Item root = query.getModel().getQueryTree().getRoot();
Item possibleNewRoot = optimizeAndNot(root);
if (root != possibleNewRoot) {
query.getModel().getQueryTree().setRoot(possibleNewRoot);
}
}
/**
* Optimizes the given query tree based on its {@link Model
*/
public static void optimizeByRestrict(Query query) {
if (query.getModel().getRestrict().size() != 1) {
return;
}
Item root = query.getModel().getQueryTree().getRoot();
if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) {
query.getModel().getQueryTree().setRoot(new NullItem());
}
}
/**
* Collapses all single-child {@link CompositeItem}s into their parent item.
*/
public static void collapseSingleComposites(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = collapseSingleComposites(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
/**
* Replaces and {@link SimpleIndexedItem} searching in the {@link Hit
* appropriate for the search node.
*/
public static void rewriteSddocname(Query query) {
Item oldRoot = query.getModel().getQueryTree().getRoot();
Item newRoot = rewriteSddocname(oldRoot);
if (oldRoot != newRoot) {
query.getModel().getQueryTree().setRoot(newRoot);
}
}
private static Item optimizeAndNot(Item node) {
if (node instanceof CompositeItem) {
return extractAndNotRecursively((CompositeItem) node);
}
return node;
}
private static CompositeItem extractAndNotRecursively(CompositeItem parent) {
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
Item possibleNewChild = optimizeAndNot(child);
if (child != possibleNewChild) {
parent.setItem(i, possibleNewChild);
}
}
if (parent instanceof AndItem) {
return extractAndNot((AndItem) parent);
}
return parent;
}
private static CompositeItem extractAndNot(AndItem parent) {
NotItem theOnlyNot = null;
for (int i = 0; i < parent.getItemCount(); i++) {
Item child = parent.getItem(i);
if (child instanceof NotItem) {
NotItem thisNot = (NotItem) child;
parent.setItem(i, thisNot.getPositiveItem());
if (theOnlyNot == null) {
theOnlyNot = thisNot;
theOnlyNot.setPositiveItem(parent);
} else {
for (int j=1; j < thisNot.getItemCount(); j++) {
theOnlyNot.addNegativeItem(thisNot.getItem(j));
}
}
}
}
return (theOnlyNot != null) ? theOnlyNot : parent;
}
private static Recall optimizeByRestrict(Item item, String restrictParam) {
if (item instanceof SimpleIndexedItem) {
return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam);
} else if (item instanceof NotItem) {
return optimizeNotItemByRestrict((NotItem)item, restrictParam);
} else if (item instanceof CompositeItem) {
return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam);
} else {
return Recall.UNKNOWN_RECALL;
}
}
private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) {
if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) {
return Recall.UNKNOWN_RECALL;
}
return restrictParam.equals(item.getIndexedString())
? Recall.RECALLS_EVERYTHING
: Recall.RECALLS_NOTHING;
}
private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) {
if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) {
return Recall.RECALLS_NOTHING;
}
for (int i = item.getItemCount(); --i >= 1; ) {
Item child = item.getItem(i);
switch (optimizeByRestrict(child, restrictParam)) {
case RECALLS_EVERYTHING:
return Recall.RECALLS_NOTHING;
case RECALLS_NOTHING:
item.removeItem(i);
break;
}
}
return Recall.UNKNOWN_RECALL;
}
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) {
Recall recall = Recall.UNKNOWN_RECALL;
for (int i = item.getItemCount(); --i >= 0; ) {
switch (optimizeByRestrict(item.getItem(i), restrictParam)) {
case RECALLS_EVERYTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
removeOtherNonrankedChildren(item, i);
recall = Recall.RECALLS_EVERYTHING;
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
if ( ! isRanked(item.getItem(i))) {
item.removeItem(i);
}
} else if (item instanceof RankItem) {
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
case RECALLS_NOTHING:
if ((item instanceof OrItem) || (item instanceof EquivItem)) {
item.removeItem(i);
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
return Recall.RECALLS_NOTHING;
} else if (item instanceof RankItem) {
item.removeItem(i);
} else {
throw new UnsupportedOperationException(item.getClass().getName());
}
break;
}
}
return recall;
}
private static void removeOtherNonrankedChildren(CompositeItem parent, int indexOfChildToKeep) {
Item childToKeep = parent.getItem(indexOfChildToKeep);
for (int i = parent.getItemCount(); --i >= 0; ) {
Item child = parent.getItem(i);
if ( child != childToKeep && ! parent.getItem(i).isRanked())
parent.removeItem(i);
}
}
private static Item collapseSingleComposites(Item item) {
if (!(item instanceof CompositeItem)) {
return item;
}
CompositeItem parent = (CompositeItem)item;
int numChildren = parent.getItemCount();
for (int i = 0; i < numChildren; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = collapseSingleComposites(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
return parent.extractSingleChild().orElse(item);
}
private static Item rewriteSddocname(Item item) {
if (item instanceof CompositeItem) {
CompositeItem parent = (CompositeItem)item;
for (int i = 0, len = parent.getItemCount(); i < len; ++i) {
Item oldChild = parent.getItem(i);
Item newChild = rewriteSddocname(oldChild);
if (oldChild != newChild) {
parent.setItem(i, newChild);
}
}
} else if (item instanceof SimpleIndexedItem) {
SimpleIndexedItem oldItem = (SimpleIndexedItem)item;
if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) {
SubstringItem newItem = new SubstringItem(oldItem.getIndexedString());
newItem.setIndexName("[documentmetastore]");
return newItem;
}
}
return item;
}
} |
Log the expected set and actual set? | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone, String warningPrefix) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null)
return logWarning(warningPrefix, "Secret store returned null for certificate");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning(warningPrefix, "Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning(warningPrefix, "Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning(warningPrefix, "Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning(warningPrefix, "Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if(Sets.intersection(subjectAlternativeNames, Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))).isEmpty()) {
return logWarning(warningPrefix, "No overlap between SANs in certificate and expected SANs");
}
return true;
} catch (SecretNotFoundException s) {
return logWarning(warningPrefix, "Certificate not found in secret store");
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | return logWarning(warningPrefix, "No overlap between SANs in certificate and expected SANs"); | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone, String warningPrefix) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null)
return logWarning(warningPrefix, "Secret store returned null for certificate");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning(warningPrefix, "Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning(warningPrefix, "Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning(warningPrefix, "Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning(warningPrefix, "Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if(Sets.intersection(subjectAlternativeNames, Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))).isEmpty()) {
return logWarning(warningPrefix, "No overlap between SANs in certificate and expected SANs");
}
return true;
} catch (SecretNotFoundException s) {
return logWarning(warningPrefix, "Certificate not found in secret store");
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointCertificateProvider endpointCertificateProvider;
private final Clock clock;
private final BooleanFlag useRefreshedEndpointCertificate;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
EndpointCertificateProvider endpointCertificateProvider,
Clock clock, FlagSource flagSource) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.endpointCertificateProvider = endpointCertificateProvider;
this.clock = clock;
this.useRefreshedEndpointCertificate = Flags.USE_REFRESHED_ENDPOINT_CERTIFICATE.bindTo(flagSource);
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
var endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.orElseGet(() -> provisionEndpointCertificate(instance));
if (useRefreshedEndpointCertificate.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value()) {
var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = new EndpointCertificateMetadata(
endpointCertificateMetadata.keyName(),
endpointCertificateMetadata.certName(),
latestAvailableVersion.getAsInt()
);
if (verifyEndpointCertificate(refreshedCertificateMetadata, instance, zone, "Did not refresh, problems with refreshed certificate: "))
return Optional.of(refreshedCertificateMetadata);
}
}
verifyEndpointCertificate(endpointCertificateMetadata, instance, zone, "Problems while verifying certificate: ");
return Optional.of(endpointCertificateMetadata);
}
private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
EndpointCertificateMetadata provisionedCertificateMetadata = endpointCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String warningPrefix, String message) {
log.log(LogLevel.WARNING, warningPrefix + message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointCertificateProvider endpointCertificateProvider;
private final Clock clock;
private final BooleanFlag useRefreshedEndpointCertificate;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
EndpointCertificateProvider endpointCertificateProvider,
Clock clock, FlagSource flagSource) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.endpointCertificateProvider = endpointCertificateProvider;
this.clock = clock;
this.useRefreshedEndpointCertificate = Flags.USE_REFRESHED_ENDPOINT_CERTIFICATE.bindTo(flagSource);
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
var endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.orElseGet(() -> provisionEndpointCertificate(instance));
if (useRefreshedEndpointCertificate.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value()) {
var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = new EndpointCertificateMetadata(
endpointCertificateMetadata.keyName(),
endpointCertificateMetadata.certName(),
latestAvailableVersion.getAsInt()
);
if (verifyEndpointCertificate(refreshedCertificateMetadata, instance, zone, "Did not refresh, problems with refreshed certificate: "))
return Optional.of(refreshedCertificateMetadata);
}
}
verifyEndpointCertificate(endpointCertificateMetadata, instance, zone, "Problems while verifying certificate: ");
return Optional.of(endpointCertificateMetadata);
}
private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
EndpointCertificateMetadata provisionedCertificateMetadata = endpointCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String warningPrefix, String message) {
log.log(LogLevel.WARNING, warningPrefix + message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} |
This is expected on first time deployment, but not on the subsequent deployments. Consider not invoking this code path on first time deployment or include a log stating that this can be expected in certain cases. On any subsequent calls this should be considered Error, not warning. | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone, String warningPrefix) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null)
return logWarning(warningPrefix, "Secret store returned null for certificate");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning(warningPrefix, "Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning(warningPrefix, "Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning(warningPrefix, "Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning(warningPrefix, "Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if(Sets.intersection(subjectAlternativeNames, Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))).isEmpty()) {
return logWarning(warningPrefix, "No overlap between SANs in certificate and expected SANs");
}
return true;
} catch (SecretNotFoundException s) {
return logWarning(warningPrefix, "Certificate not found in secret store");
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | return logWarning(warningPrefix, "Certificate not found in secret store"); | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone, String warningPrefix) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null)
return logWarning(warningPrefix, "Secret store returned null for certificate");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning(warningPrefix, "Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning(warningPrefix, "Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning(warningPrefix, "Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning(warningPrefix, "Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if(Sets.intersection(subjectAlternativeNames, Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))).isEmpty()) {
return logWarning(warningPrefix, "No overlap between SANs in certificate and expected SANs");
}
return true;
} catch (SecretNotFoundException s) {
return logWarning(warningPrefix, "Certificate not found in secret store");
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointCertificateProvider endpointCertificateProvider;
private final Clock clock;
private final BooleanFlag useRefreshedEndpointCertificate;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
EndpointCertificateProvider endpointCertificateProvider,
Clock clock, FlagSource flagSource) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.endpointCertificateProvider = endpointCertificateProvider;
this.clock = clock;
this.useRefreshedEndpointCertificate = Flags.USE_REFRESHED_ENDPOINT_CERTIFICATE.bindTo(flagSource);
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
var endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.orElseGet(() -> provisionEndpointCertificate(instance));
if (useRefreshedEndpointCertificate.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value()) {
var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = new EndpointCertificateMetadata(
endpointCertificateMetadata.keyName(),
endpointCertificateMetadata.certName(),
latestAvailableVersion.getAsInt()
);
if (verifyEndpointCertificate(refreshedCertificateMetadata, instance, zone, "Did not refresh, problems with refreshed certificate: "))
return Optional.of(refreshedCertificateMetadata);
}
}
verifyEndpointCertificate(endpointCertificateMetadata, instance, zone, "Problems while verifying certificate: ");
return Optional.of(endpointCertificateMetadata);
}
private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
EndpointCertificateMetadata provisionedCertificateMetadata = endpointCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String warningPrefix, String message) {
log.log(LogLevel.WARNING, warningPrefix + message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointCertificateProvider endpointCertificateProvider;
private final Clock clock;
private final BooleanFlag useRefreshedEndpointCertificate;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
EndpointCertificateProvider endpointCertificateProvider,
Clock clock, FlagSource flagSource) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.endpointCertificateProvider = endpointCertificateProvider;
this.clock = clock;
this.useRefreshedEndpointCertificate = Flags.USE_REFRESHED_ENDPOINT_CERTIFICATE.bindTo(flagSource);
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
var endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.orElseGet(() -> provisionEndpointCertificate(instance));
if (useRefreshedEndpointCertificate.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value()) {
var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = new EndpointCertificateMetadata(
endpointCertificateMetadata.keyName(),
endpointCertificateMetadata.certName(),
latestAvailableVersion.getAsInt()
);
if (verifyEndpointCertificate(refreshedCertificateMetadata, instance, zone, "Did not refresh, problems with refreshed certificate: "))
return Optional.of(refreshedCertificateMetadata);
}
}
verifyEndpointCertificate(endpointCertificateMetadata, instance, zone, "Problems while verifying certificate: ");
return Optional.of(endpointCertificateMetadata);
}
private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
EndpointCertificateMetadata provisionedCertificateMetadata = endpointCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String warningPrefix, String message) {
log.log(LogLevel.WARNING, warningPrefix + message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} |
error? | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone, String warningPrefix) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null)
return logWarning(warningPrefix, "Secret store returned null for certificate");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning(warningPrefix, "Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning(warningPrefix, "Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning(warningPrefix, "Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning(warningPrefix, "Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if(Sets.intersection(subjectAlternativeNames, Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))).isEmpty()) {
return logWarning(warningPrefix, "No overlap between SANs in certificate and expected SANs");
}
return true;
} catch (SecretNotFoundException s) {
return logWarning(warningPrefix, "Certificate not found in secret store");
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e); | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone, String warningPrefix) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null)
return logWarning(warningPrefix, "Secret store returned null for certificate");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning(warningPrefix, "Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning(warningPrefix, "Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning(warningPrefix, "Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning(warningPrefix, "Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if(Sets.intersection(subjectAlternativeNames, Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))).isEmpty()) {
return logWarning(warningPrefix, "No overlap between SANs in certificate and expected SANs");
}
return true;
} catch (SecretNotFoundException s) {
return logWarning(warningPrefix, "Certificate not found in secret store");
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointCertificateProvider endpointCertificateProvider;
private final Clock clock;
private final BooleanFlag useRefreshedEndpointCertificate;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
EndpointCertificateProvider endpointCertificateProvider,
Clock clock, FlagSource flagSource) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.endpointCertificateProvider = endpointCertificateProvider;
this.clock = clock;
this.useRefreshedEndpointCertificate = Flags.USE_REFRESHED_ENDPOINT_CERTIFICATE.bindTo(flagSource);
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
var endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.orElseGet(() -> provisionEndpointCertificate(instance));
if (useRefreshedEndpointCertificate.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value()) {
var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = new EndpointCertificateMetadata(
endpointCertificateMetadata.keyName(),
endpointCertificateMetadata.certName(),
latestAvailableVersion.getAsInt()
);
if (verifyEndpointCertificate(refreshedCertificateMetadata, instance, zone, "Did not refresh, problems with refreshed certificate: "))
return Optional.of(refreshedCertificateMetadata);
}
}
verifyEndpointCertificate(endpointCertificateMetadata, instance, zone, "Problems while verifying certificate: ");
return Optional.of(endpointCertificateMetadata);
}
private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
EndpointCertificateMetadata provisionedCertificateMetadata = endpointCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String warningPrefix, String message) {
log.log(LogLevel.WARNING, warningPrefix + message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointCertificateProvider endpointCertificateProvider;
private final Clock clock;
private final BooleanFlag useRefreshedEndpointCertificate;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
EndpointCertificateProvider endpointCertificateProvider,
Clock clock, FlagSource flagSource) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.endpointCertificateProvider = endpointCertificateProvider;
this.clock = clock;
this.useRefreshedEndpointCertificate = Flags.USE_REFRESHED_ENDPOINT_CERTIFICATE.bindTo(flagSource);
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
var endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.orElseGet(() -> provisionEndpointCertificate(instance));
if (useRefreshedEndpointCertificate.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value()) {
var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = new EndpointCertificateMetadata(
endpointCertificateMetadata.keyName(),
endpointCertificateMetadata.certName(),
latestAvailableVersion.getAsInt()
);
if (verifyEndpointCertificate(refreshedCertificateMetadata, instance, zone, "Did not refresh, problems with refreshed certificate: "))
return Optional.of(refreshedCertificateMetadata);
}
}
verifyEndpointCertificate(endpointCertificateMetadata, instance, zone, "Problems while verifying certificate: ");
return Optional.of(endpointCertificateMetadata);
}
private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
EndpointCertificateMetadata provisionedCertificateMetadata = endpointCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String warningPrefix, String message) {
log.log(LogLevel.WARNING, warningPrefix + message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} |
error? | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone, String warningPrefix) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null)
return logWarning(warningPrefix, "Secret store returned null for certificate");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning(warningPrefix, "Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning(warningPrefix, "Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning(warningPrefix, "Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning(warningPrefix, "Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if(Sets.intersection(subjectAlternativeNames, Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))).isEmpty()) {
return logWarning(warningPrefix, "No overlap between SANs in certificate and expected SANs");
}
return true;
} catch (SecretNotFoundException s) {
return logWarning(warningPrefix, "Certificate not found in secret store");
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | return logWarning(warningPrefix, "Secret store returned null for certificate"); | private boolean verifyEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone, String warningPrefix) {
try {
var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version());
if (pemEncodedEndpointCertificate == null)
return logWarning(warningPrefix, "Secret store returned null for certificate");
List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate);
if (x509CertificateList.isEmpty()) return logWarning(warningPrefix, "Empty certificate list");
if (x509CertificateList.size() < 2)
return logWarning(warningPrefix, "Only a single certificate found in chain - intermediate certificates likely missing");
Instant now = clock.instant();
Instant firstExpiry = Instant.MAX;
for (X509Certificate x509Certificate : x509CertificateList) {
Instant notBefore = x509Certificate.getNotBefore().toInstant();
Instant notAfter = x509Certificate.getNotAfter().toInstant();
if (now.isBefore(notBefore)) return logWarning(warningPrefix, "Certificate is not yet valid");
if (now.isAfter(notAfter)) return logWarning(warningPrefix, "Certificate has expired");
if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter;
}
X509Certificate endEntityCertificate = x509CertificateList.get(0);
Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream()
.filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME))
.map(SubjectAlternativeName::getValue).collect(Collectors.toSet());
if(Sets.intersection(subjectAlternativeNames, Set.copyOf(dnsNamesOf(instance.id(), List.of(zone)))).isEmpty()) {
return logWarning(warningPrefix, "No overlap between SANs in certificate and expected SANs");
}
return true;
} catch (SecretNotFoundException s) {
return logWarning(warningPrefix, "Certificate not found in secret store");
} catch (Exception e) {
log.log(LogLevel.WARNING, "Exception thrown when verifying endpoint certificate", e);
return false;
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointCertificateProvider endpointCertificateProvider;
private final Clock clock;
private final BooleanFlag useRefreshedEndpointCertificate;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
EndpointCertificateProvider endpointCertificateProvider,
Clock clock, FlagSource flagSource) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.endpointCertificateProvider = endpointCertificateProvider;
this.clock = clock;
this.useRefreshedEndpointCertificate = Flags.USE_REFRESHED_ENDPOINT_CERTIFICATE.bindTo(flagSource);
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
var endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.orElseGet(() -> provisionEndpointCertificate(instance));
if (useRefreshedEndpointCertificate.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value()) {
var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = new EndpointCertificateMetadata(
endpointCertificateMetadata.keyName(),
endpointCertificateMetadata.certName(),
latestAvailableVersion.getAsInt()
);
if (verifyEndpointCertificate(refreshedCertificateMetadata, instance, zone, "Did not refresh, problems with refreshed certificate: "))
return Optional.of(refreshedCertificateMetadata);
}
}
verifyEndpointCertificate(endpointCertificateMetadata, instance, zone, "Problems while verifying certificate: ");
return Optional.of(endpointCertificateMetadata);
}
private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
EndpointCertificateMetadata provisionedCertificateMetadata = endpointCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String warningPrefix, String message) {
log.log(LogLevel.WARNING, warningPrefix + message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} | class EndpointCertificateManager {
private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName());
private final ZoneRegistry zoneRegistry;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointCertificateProvider endpointCertificateProvider;
private final Clock clock;
private final BooleanFlag useRefreshedEndpointCertificate;
public EndpointCertificateManager(ZoneRegistry zoneRegistry,
CuratorDb curator,
SecretStore secretStore,
EndpointCertificateProvider endpointCertificateProvider,
Clock clock, FlagSource flagSource) {
this.zoneRegistry = zoneRegistry;
this.curator = curator;
this.secretStore = secretStore;
this.endpointCertificateProvider = endpointCertificateProvider;
this.clock = clock;
this.useRefreshedEndpointCertificate = Flags.USE_REFRESHED_ENDPOINT_CERTIFICATE.bindTo(flagSource);
}
public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone) {
if (!zoneRegistry.zones().directlyRouted().ids().contains(zone)) return Optional.empty();
var endpointCertificateMetadata =
curator.readEndpointCertificateMetadata(instance.id())
.orElseGet(() -> provisionEndpointCertificate(instance));
if (useRefreshedEndpointCertificate.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value()) {
var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata);
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) {
var refreshedCertificateMetadata = new EndpointCertificateMetadata(
endpointCertificateMetadata.keyName(),
endpointCertificateMetadata.certName(),
latestAvailableVersion.getAsInt()
);
if (verifyEndpointCertificate(refreshedCertificateMetadata, instance, zone, "Did not refresh, problems with refreshed certificate: "))
return Optional.of(refreshedCertificateMetadata);
}
}
verifyEndpointCertificate(endpointCertificateMetadata, instance, zone, "Problems while verifying certificate: ");
return Optional.of(endpointCertificateMetadata);
}
private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
}
private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance) {
List<ZoneId> directlyRoutedZones = zoneRegistry.zones().directlyRouted().zones().stream().map(ZoneApi::getId).collect(Collectors.toUnmodifiableList());
EndpointCertificateMetadata provisionedCertificateMetadata = endpointCertificateProvider
.requestCaSignedCertificate(instance.id(), dnsNamesOf(instance.id(), directlyRoutedZones));
curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata);
return provisionedCertificateMetadata;
}
private static boolean logWarning(String warningPrefix, String message) {
log.log(LogLevel.WARNING, warningPrefix + message);
return false;
}
private List<String> dnsNamesOf(ApplicationId applicationId, List<ZoneId> zones) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, zoneRegistry.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.defaultId());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = zones.stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone),
Endpoint.of(applicationId).wildcard(zone)
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
} |
This should be documented as well | private void updateGlobalDnsOf(Collection<RoutingPolicy> routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Lock lock) {
var routingTable = routingTableFrom(routingPolicies);
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
var targets = new LinkedHashSet<AliasTarget>();
var staleTargets = new LinkedHashSet<AliasTarget>();
for (var policy : routeEntry.getValue()) {
if (policy.dnsZone().isEmpty()) continue;
var target = new AliasTarget(policy.canonicalName(), policy.dnsZone().get(), policy.id().zone());
var zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
if (isConfiguredOut(policy, zonePolicy, inactiveZones)) {
staleTargets.add(target);
} else {
targets.add(target);
}
}
if (targets.isEmpty() && !staleTargets.isEmpty()) {
targets.addAll(staleTargets);
staleTargets.clear();
}
if (!targets.isEmpty()) {
var endpoint = RoutingPolicy.globalEndpointOf(routeEntry.getKey().application(),
routeEntry.getKey().endpointId(), controller.system());
controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), targets, Priority.normal);
}
staleTargets.forEach(t -> controller.nameServiceForwarder().removeRecords(Record.Type.ALIAS,
RecordData.fqdn(t.name().value()),
Priority.normal));
}
} | private void updateGlobalDnsOf(Collection<RoutingPolicy> routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Lock lock) {
var routingTable = routingTableFrom(routingPolicies);
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
var targets = new LinkedHashSet<AliasTarget>();
var staleTargets = new LinkedHashSet<AliasTarget>();
for (var policy : routeEntry.getValue()) {
if (policy.dnsZone().isEmpty()) continue;
var target = new AliasTarget(policy.canonicalName(), policy.dnsZone().get(), policy.id().zone());
var zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
if (isConfiguredOut(policy, zonePolicy, inactiveZones)) {
staleTargets.add(target);
} else {
targets.add(target);
}
}
if (targets.isEmpty() && !staleTargets.isEmpty()) {
targets.addAll(staleTargets);
staleTargets.clear();
}
if (!targets.isEmpty()) {
var endpoint = RoutingPolicy.globalEndpointOf(routeEntry.getKey().application(),
routeEntry.getKey().endpointId(), controller.system());
controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), targets, Priority.normal);
}
staleTargets.forEach(t -> controller.nameServiceForwarder().removeRecords(Record.Type.ALIAS,
RecordData.fqdn(t.name().value()),
Priority.normal));
}
} | class RoutingPolicies {
private final Controller controller;
private final CuratorDb db;
public RoutingPolicies(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.db = controller.curator();
try (var lock = db.lockRoutingPolicies()) {
for (var policy : db.readRoutingPolicies().entrySet()) {
db.writeRoutingPolicies(policy.getKey(), policy.getValue());
}
}
}
/** Read all known routing policies for given instance */
public Map<RoutingPolicyId, RoutingPolicy> get(ApplicationId application) {
return db.readRoutingPolicies(application);
}
/** Read all known routing policies for given deployment */
public Map<RoutingPolicyId, RoutingPolicy> get(DeploymentId deployment) {
return get(deployment.applicationId(), deployment.zoneId());
}
/** Read all known routing policies for given deployment */
public Map<RoutingPolicyId, RoutingPolicy> get(ApplicationId application, ZoneId zone) {
return db.readRoutingPolicies(application).entrySet()
.stream()
.filter(kv -> kv.getKey().zone().equals(zone))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
/** Read routing policy for given zone */
public ZoneRoutingPolicy get(ZoneId zone) {
return db.readZoneRoutingPolicy(zone);
}
/**
* Refresh routing policies for application in given zone. This is idempotent and changes will only be performed if
* load balancers for given application have changed.
*/
public void refresh(ApplicationId application, DeploymentSpec deploymentSpec, ZoneId zone) {
if (!controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) return;
var loadBalancers = new AllocatedLoadBalancers(application, zone, controller.serviceRegistry().configServer()
.getLoadBalancers(application, zone),
deploymentSpec);
var inactiveZones = inactiveZones(application, deploymentSpec);
try (var lock = db.lockRoutingPolicies()) {
if (!application.instance().isTester()) removeGlobalDnsUnreferencedBy(loadBalancers, lock);
storePoliciesOf(loadBalancers, lock);
removePoliciesUnreferencedBy(loadBalancers, lock);
if (!application.instance().isTester()) updateGlobalDnsOf(get(loadBalancers.application).values(), inactiveZones, lock);
}
}
/** Set the status of all global endpoints in given zone */
public void setGlobalRoutingStatus(ZoneId zone, GlobalRouting.Status status) {
try (var lock = db.lockRoutingPolicies()) {
db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, GlobalRouting.status(status, GlobalRouting.Agent.operator,
controller.clock().instant())));
var allPolicies = db.readRoutingPolicies();
for (var applicationPolicies : allPolicies.values()) {
updateGlobalDnsOf(applicationPolicies.values(), Set.of(), lock);
}
}
}
/** Set the status of all global endpoints for given deployment */
public void setGlobalRoutingStatus(DeploymentId deployment, GlobalRouting.Status status, GlobalRouting.Agent agent) {
try (var lock = db.lockRoutingPolicies()) {
var policies = get(deployment.applicationId());
var newPolicies = new LinkedHashMap<>(policies);
for (var policy : policies.values()) {
if (!policy.id().zone().equals(deployment.zoneId())) continue;
var newPolicy = policy.with(policy.status().with(GlobalRouting.status(status, agent,
controller.clock().instant())));
newPolicies.put(policy.id(), newPolicy);
}
db.writeRoutingPolicies(deployment.applicationId(), newPolicies);
updateGlobalDnsOf(newPolicies.values(), Set.of(), lock);
}
}
/** Update global DNS record for given policies */
/** Store routing policies for given load balancers */
private void storePoliciesOf(AllocatedLoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) {
var policies = new LinkedHashMap<>(get(loadBalancers.application));
for (LoadBalancer loadBalancer : loadBalancers.list) {
var policyId = new RoutingPolicyId(loadBalancer.application(), loadBalancer.cluster(), loadBalancers.zone);
var existingPolicy = policies.get(policyId);
var newPolicy = new RoutingPolicy(policyId, loadBalancer.hostname(), loadBalancer.dnsZone(),
loadBalancers.endpointIdsOf(loadBalancer),
new Status(isActive(loadBalancer), GlobalRouting.DEFAULT_STATUS));
if (existingPolicy != null) {
newPolicy = newPolicy.with(newPolicy.status().with(existingPolicy.status().globalRouting()));
}
updateZoneDnsOf(newPolicy);
policies.put(newPolicy.id(), newPolicy);
}
db.writeRoutingPolicies(loadBalancers.application, policies);
}
/** Update zone DNS record for given policy */
private void updateZoneDnsOf(RoutingPolicy policy) {
var name = RecordName.from(policy.endpointIn(controller.system()).dnsName());
var data = RecordData.fqdn(policy.canonicalName().value());
controller.nameServiceForwarder().createCname(name, data, Priority.normal);
}
/** Remove policies and zone DNS records unreferenced by given load balancers */
private void removePoliciesUnreferencedBy(AllocatedLoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) {
var policies = get(loadBalancers.application);
var newPolicies = new LinkedHashMap<>(policies);
var activeLoadBalancers = loadBalancers.list.stream().map(LoadBalancer::hostname).collect(Collectors.toSet());
for (var policy : policies.values()) {
if (activeLoadBalancers.contains(policy.canonicalName()) ||
!policy.id().zone().equals(loadBalancers.zone)) continue;
var dnsName = policy.endpointIn(controller.system()).dnsName();
controller.nameServiceForwarder().removeRecords(Record.Type.CNAME, RecordName.from(dnsName), Priority.normal);
newPolicies.remove(policy.id());
}
db.writeRoutingPolicies(loadBalancers.application, newPolicies);
}
/** Remove unreferenced global endpoints from DNS */
private void removeGlobalDnsUnreferencedBy(AllocatedLoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) {
var zonePolicies = get(loadBalancers.application, loadBalancers.zone).values();
var removalCandidates = new HashSet<>(routingTableFrom(zonePolicies).keySet());
var activeRoutingIds = routingIdsFrom(loadBalancers);
removalCandidates.removeAll(activeRoutingIds);
for (var id : removalCandidates) {
var endpoint = RoutingPolicy.globalEndpointOf(id.application(), id.endpointId(), controller.system());
controller.nameServiceForwarder().removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()), Priority.normal);
}
}
/** Compute routing IDs from given load balancers */
private static Set<RoutingId> routingIdsFrom(AllocatedLoadBalancers loadBalancers) {
Set<RoutingId> routingIds = new LinkedHashSet<>();
for (var loadBalancer : loadBalancers.list) {
for (var endpointId : loadBalancers.endpointIdsOf(loadBalancer)) {
routingIds.add(new RoutingId(loadBalancer.application(), endpointId));
}
}
return Collections.unmodifiableSet(routingIds);
}
/** Compute a routing table from given policies */
private static Map<RoutingId, List<RoutingPolicy>> routingTableFrom(Collection<RoutingPolicy> routingPolicies) {
var routingTable = new LinkedHashMap<RoutingId, List<RoutingPolicy>>();
for (var policy : routingPolicies) {
for (var endpoint : policy.endpoints()) {
var id = new RoutingId(policy.id().owner(), endpoint);
routingTable.putIfAbsent(id, new ArrayList<>());
routingTable.get(id).add(policy);
}
}
return Collections.unmodifiableMap(routingTable);
}
/** Returns whether the global routing status of given policy is configured to be {@link GlobalRouting.Status
private static boolean isConfiguredOut(RoutingPolicy policy, ZoneRoutingPolicy zonePolicy, Set<ZoneId> inactiveZones) {
return zonePolicy.globalRouting().status() == GlobalRouting.Status.out ||
policy.status().globalRouting().status() == GlobalRouting.Status.out ||
inactiveZones.contains(policy.id().zone());
}
private static boolean isActive(LoadBalancer loadBalancer) {
switch (loadBalancer.state()) {
case reserved:
case active: return true;
}
return false;
}
/** Load balancers allocated to a deployment */
private static class AllocatedLoadBalancers {
private final ApplicationId application;
private final ZoneId zone;
private final List<LoadBalancer> list;
private final DeploymentSpec deploymentSpec;
private AllocatedLoadBalancers(ApplicationId application, ZoneId zone, List<LoadBalancer> loadBalancers,
DeploymentSpec deploymentSpec) {
this.application = application;
this.zone = zone;
this.list = List.copyOf(loadBalancers);
this.deploymentSpec = deploymentSpec;
}
/** Compute all endpoint IDs for given load balancer */
private Set<EndpointId> endpointIdsOf(LoadBalancer loadBalancer) {
if (!zone.environment().isProduction()) {
return Set.of();
}
var instanceSpec = deploymentSpec.instance(loadBalancer.application().instance());
if (instanceSpec.isEmpty()) {
return Set.of();
}
return instanceSpec.get().endpoints().stream()
.filter(endpoint -> endpoint.containerId().equals(loadBalancer.cluster().value()))
.filter(endpoint -> endpoint.regions().contains(zone.region()))
.map(com.yahoo.config.application.api.Endpoint::endpointId)
.map(EndpointId::of)
.collect(Collectors.toSet());
}
}
/** Returns zones where global routing is declared inactive for instance through deploymentSpec */
private static Set<ZoneId> inactiveZones(ApplicationId instance, DeploymentSpec deploymentSpec) {
var instanceSpec = deploymentSpec.instance(instance.instance());
if (instanceSpec.isEmpty()) return Set.of();
return instanceSpec.get().zones().stream()
.filter(zone -> zone.environment().isProduction())
.filter(zone -> !zone.active())
.map(zone -> ZoneId.from(zone.environment(), zone.region().get()))
.collect(Collectors.toUnmodifiableSet());
}
} | class RoutingPolicies {
private final Controller controller;
private final CuratorDb db;
public RoutingPolicies(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.db = controller.curator();
try (var lock = db.lockRoutingPolicies()) {
for (var policy : db.readRoutingPolicies().entrySet()) {
db.writeRoutingPolicies(policy.getKey(), policy.getValue());
}
}
}
/** Read all known routing policies for given instance */
public Map<RoutingPolicyId, RoutingPolicy> get(ApplicationId application) {
return db.readRoutingPolicies(application);
}
/** Read all known routing policies for given deployment */
public Map<RoutingPolicyId, RoutingPolicy> get(DeploymentId deployment) {
return get(deployment.applicationId(), deployment.zoneId());
}
/** Read all known routing policies for given deployment */
public Map<RoutingPolicyId, RoutingPolicy> get(ApplicationId application, ZoneId zone) {
return db.readRoutingPolicies(application).entrySet()
.stream()
.filter(kv -> kv.getKey().zone().equals(zone))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
/** Read routing policy for given zone */
public ZoneRoutingPolicy get(ZoneId zone) {
return db.readZoneRoutingPolicy(zone);
}
/**
* Refresh routing policies for application in given zone. This is idempotent and changes will only be performed if
* load balancers for given application have changed.
*/
public void refresh(ApplicationId application, DeploymentSpec deploymentSpec, ZoneId zone) {
if (!controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) return;
var loadBalancers = new AllocatedLoadBalancers(application, zone, controller.serviceRegistry().configServer()
.getLoadBalancers(application, zone),
deploymentSpec);
var inactiveZones = inactiveZones(application, deploymentSpec);
try (var lock = db.lockRoutingPolicies()) {
if (!application.instance().isTester()) removeGlobalDnsUnreferencedBy(loadBalancers, lock);
storePoliciesOf(loadBalancers, lock);
removePoliciesUnreferencedBy(loadBalancers, lock);
if (!application.instance().isTester()) updateGlobalDnsOf(get(loadBalancers.application).values(), inactiveZones, lock);
}
}
/** Set the status of all global endpoints in given zone */
public void setGlobalRoutingStatus(ZoneId zone, GlobalRouting.Status status) {
try (var lock = db.lockRoutingPolicies()) {
db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, GlobalRouting.status(status, GlobalRouting.Agent.operator,
controller.clock().instant())));
var allPolicies = db.readRoutingPolicies();
for (var applicationPolicies : allPolicies.values()) {
updateGlobalDnsOf(applicationPolicies.values(), Set.of(), lock);
}
}
}
/** Set the status of all global endpoints for given deployment */
public void setGlobalRoutingStatus(DeploymentId deployment, GlobalRouting.Status status, GlobalRouting.Agent agent) {
try (var lock = db.lockRoutingPolicies()) {
var policies = get(deployment.applicationId());
var newPolicies = new LinkedHashMap<>(policies);
for (var policy : policies.values()) {
if (!policy.id().zone().equals(deployment.zoneId())) continue;
var newPolicy = policy.with(policy.status().with(GlobalRouting.status(status, agent,
controller.clock().instant())));
newPolicies.put(policy.id(), newPolicy);
}
db.writeRoutingPolicies(deployment.applicationId(), newPolicies);
updateGlobalDnsOf(newPolicies.values(), Set.of(), lock);
}
}
/** Update global DNS record for given policies */
/** Store routing policies for given load balancers */
private void storePoliciesOf(AllocatedLoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) {
var policies = new LinkedHashMap<>(get(loadBalancers.application));
for (LoadBalancer loadBalancer : loadBalancers.list) {
var policyId = new RoutingPolicyId(loadBalancer.application(), loadBalancer.cluster(), loadBalancers.zone);
var existingPolicy = policies.get(policyId);
var newPolicy = new RoutingPolicy(policyId, loadBalancer.hostname(), loadBalancer.dnsZone(),
loadBalancers.endpointIdsOf(loadBalancer),
new Status(isActive(loadBalancer), GlobalRouting.DEFAULT_STATUS));
if (existingPolicy != null) {
newPolicy = newPolicy.with(newPolicy.status().with(existingPolicy.status().globalRouting()));
}
updateZoneDnsOf(newPolicy);
policies.put(newPolicy.id(), newPolicy);
}
db.writeRoutingPolicies(loadBalancers.application, policies);
}
/** Update zone DNS record for given policy */
private void updateZoneDnsOf(RoutingPolicy policy) {
var name = RecordName.from(policy.endpointIn(controller.system()).dnsName());
var data = RecordData.fqdn(policy.canonicalName().value());
controller.nameServiceForwarder().createCname(name, data, Priority.normal);
}
/** Remove policies and zone DNS records unreferenced by given load balancers */
private void removePoliciesUnreferencedBy(AllocatedLoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) {
var policies = get(loadBalancers.application);
var newPolicies = new LinkedHashMap<>(policies);
var activeLoadBalancers = loadBalancers.list.stream().map(LoadBalancer::hostname).collect(Collectors.toSet());
for (var policy : policies.values()) {
if (activeLoadBalancers.contains(policy.canonicalName()) ||
!policy.id().zone().equals(loadBalancers.zone)) continue;
var dnsName = policy.endpointIn(controller.system()).dnsName();
controller.nameServiceForwarder().removeRecords(Record.Type.CNAME, RecordName.from(dnsName), Priority.normal);
newPolicies.remove(policy.id());
}
db.writeRoutingPolicies(loadBalancers.application, newPolicies);
}
/** Remove unreferenced global endpoints from DNS */
private void removeGlobalDnsUnreferencedBy(AllocatedLoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) {
var zonePolicies = get(loadBalancers.application, loadBalancers.zone).values();
var removalCandidates = new HashSet<>(routingTableFrom(zonePolicies).keySet());
var activeRoutingIds = routingIdsFrom(loadBalancers);
removalCandidates.removeAll(activeRoutingIds);
for (var id : removalCandidates) {
var endpoint = RoutingPolicy.globalEndpointOf(id.application(), id.endpointId(), controller.system());
controller.nameServiceForwarder().removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()), Priority.normal);
}
}
/** Compute routing IDs from given load balancers */
private static Set<RoutingId> routingIdsFrom(AllocatedLoadBalancers loadBalancers) {
Set<RoutingId> routingIds = new LinkedHashSet<>();
for (var loadBalancer : loadBalancers.list) {
for (var endpointId : loadBalancers.endpointIdsOf(loadBalancer)) {
routingIds.add(new RoutingId(loadBalancer.application(), endpointId));
}
}
return Collections.unmodifiableSet(routingIds);
}
/** Compute a routing table from given policies */
private static Map<RoutingId, List<RoutingPolicy>> routingTableFrom(Collection<RoutingPolicy> routingPolicies) {
var routingTable = new LinkedHashMap<RoutingId, List<RoutingPolicy>>();
for (var policy : routingPolicies) {
for (var endpoint : policy.endpoints()) {
var id = new RoutingId(policy.id().owner(), endpoint);
routingTable.putIfAbsent(id, new ArrayList<>());
routingTable.get(id).add(policy);
}
}
return Collections.unmodifiableMap(routingTable);
}
/** Returns whether the global routing status of given policy is configured to be {@link GlobalRouting.Status
private static boolean isConfiguredOut(RoutingPolicy policy, ZoneRoutingPolicy zonePolicy, Set<ZoneId> inactiveZones) {
return zonePolicy.globalRouting().status() == GlobalRouting.Status.out ||
policy.status().globalRouting().status() == GlobalRouting.Status.out ||
inactiveZones.contains(policy.id().zone());
}
private static boolean isActive(LoadBalancer loadBalancer) {
switch (loadBalancer.state()) {
case reserved:
case active: return true;
}
return false;
}
/** Load balancers allocated to a deployment */
private static class AllocatedLoadBalancers {
private final ApplicationId application;
private final ZoneId zone;
private final List<LoadBalancer> list;
private final DeploymentSpec deploymentSpec;
private AllocatedLoadBalancers(ApplicationId application, ZoneId zone, List<LoadBalancer> loadBalancers,
DeploymentSpec deploymentSpec) {
this.application = application;
this.zone = zone;
this.list = List.copyOf(loadBalancers);
this.deploymentSpec = deploymentSpec;
}
/** Compute all endpoint IDs for given load balancer */
private Set<EndpointId> endpointIdsOf(LoadBalancer loadBalancer) {
if (!zone.environment().isProduction()) {
return Set.of();
}
var instanceSpec = deploymentSpec.instance(loadBalancer.application().instance());
if (instanceSpec.isEmpty()) {
return Set.of();
}
return instanceSpec.get().endpoints().stream()
.filter(endpoint -> endpoint.containerId().equals(loadBalancer.cluster().value()))
.filter(endpoint -> endpoint.regions().contains(zone.region()))
.map(com.yahoo.config.application.api.Endpoint::endpointId)
.map(EndpointId::of)
.collect(Collectors.toSet());
}
}
/** Returns zones where global routing is declared inactive for instance through deploymentSpec */
private static Set<ZoneId> inactiveZones(ApplicationId instance, DeploymentSpec deploymentSpec) {
var instanceSpec = deploymentSpec.instance(instance.instance());
if (instanceSpec.isEmpty()) return Set.of();
return instanceSpec.get().zones().stream()
.filter(zone -> zone.environment().isProduction())
.filter(zone -> !zone.active())
.map(zone -> ZoneId.from(zone.environment(), zone.region().get()))
.collect(Collectors.toUnmodifiableSet());
}
} | |
Why are you removing the function indirection in these? | private Result search(String request, Query query, Chain<Searcher> searchChain) {
if (query.getTraceLevel() >= 2) {
query.trace("Invoking " + searchChain, false, 2);
}
if (searchConnections != null) {
connectionStatistics();
} else {
log.log(LogLevel.WARNING,
"searchConnections is a null reference, probably a known race condition during startup.",
new IllegalStateException("searchConnections reference is null."));
}
try {
return searchAndFill(query, searchChain);
} catch (ParseException e) {
ErrorMessage error = ErrorMessage.createIllegalQuery("Could not parse query [" + request + "]: "
+ Exceptions.toMessageString(e));
log.log(LogLevel.DEBUG, error.getDetailedMessage());
return new Result(query, error);
} catch (IllegalArgumentException e) {
if ("Comparison method violates its general contract!".equals(e.getMessage())) {
log(request, query, e);
return new Result(query, ErrorMessage.createUnspecifiedError("Failed searching: " +
Exceptions.toMessageString(e), e));
}
else {
ErrorMessage error = ErrorMessage.createBadRequest("Invalid search request [" + request + "]: "
+ Exceptions.toMessageString(e));
log.log(LogLevel.DEBUG, error.getDetailedMessage());
return new Result(query, error);
}
} catch (LinkageError | StackOverflowError e) {
ErrorMessage error = ErrorMessage.createErrorInPluginSearcher("Error executing " + searchChain + "]: " +
Exceptions.toMessageString(e), e);
log(request, query, e);
return new Result(query, error);
} catch (Exception e) {
log(request, query, e);
return new Result(query, ErrorMessage.createUnspecifiedError("Failed searching: " +
Exceptions.toMessageString(e), e));
}
} | } catch (ParseException e) { | private Result search(String request, Query query, Chain<Searcher> searchChain) {
if (query.getTraceLevel() >= 2) {
query.trace("Invoking " + searchChain, false, 2);
}
if (searchConnections != null) {
connectionStatistics();
} else {
log.log(LogLevel.WARNING,
"searchConnections is a null reference, probably a known race condition during startup.",
new IllegalStateException("searchConnections reference is null."));
}
try {
return searchAndFill(query, searchChain);
} catch (ParseException e) {
ErrorMessage error = ErrorMessage.createIllegalQuery("Could not parse query [" + request + "]: "
+ Exceptions.toMessageString(e));
log.log(LogLevel.DEBUG, error::getDetailedMessage);
return new Result(query, error);
} catch (IllegalArgumentException e) {
if ("Comparison method violates its general contract!".equals(e.getMessage())) {
log(request, query, e);
return new Result(query, ErrorMessage.createUnspecifiedError("Failed searching: " +
Exceptions.toMessageString(e), e));
}
else {
ErrorMessage error = ErrorMessage.createBadRequest("Invalid search request [" + request + "]: "
+ Exceptions.toMessageString(e));
log.log(LogLevel.DEBUG, error::getDetailedMessage);
return new Result(query, error);
}
} catch (LinkageError | StackOverflowError e) {
ErrorMessage error = ErrorMessage.createErrorInPluginSearcher("Error executing " + searchChain + "]: " +
Exceptions.toMessageString(e), e);
log(request, query, e);
return new Result(query, error);
} catch (Exception e) {
log(request, query, e);
return new Result(query, ErrorMessage.createUnspecifiedError("Failed searching: " +
Exceptions.toMessageString(e), e));
}
} | class MeanConnections implements Callback {
@Override
public void run(Handle h, boolean firstTime) {
if (firstTime) {
metric.set(SEARCH_CONNECTIONS, 0.0d, null);
return;
}
Value v = (Value) h;
metric.set(SEARCH_CONNECTIONS, v.getMean(), null);
}
} | class MeanConnections implements Callback {
@Override
public void run(Handle h, boolean firstTime) {
if (firstTime) {
metric.set(SEARCH_CONNECTIONS, 0.0d, null);
return;
}
Value v = (Value) h;
metric.set(SEARCH_CONNECTIONS, v.getMean(), null);
}
} |
That was supposed to be method reference, not method call. Fixed | private Result search(String request, Query query, Chain<Searcher> searchChain) {
if (query.getTraceLevel() >= 2) {
query.trace("Invoking " + searchChain, false, 2);
}
if (searchConnections != null) {
connectionStatistics();
} else {
log.log(LogLevel.WARNING,
"searchConnections is a null reference, probably a known race condition during startup.",
new IllegalStateException("searchConnections reference is null."));
}
try {
return searchAndFill(query, searchChain);
} catch (ParseException e) {
ErrorMessage error = ErrorMessage.createIllegalQuery("Could not parse query [" + request + "]: "
+ Exceptions.toMessageString(e));
log.log(LogLevel.DEBUG, error.getDetailedMessage());
return new Result(query, error);
} catch (IllegalArgumentException e) {
if ("Comparison method violates its general contract!".equals(e.getMessage())) {
log(request, query, e);
return new Result(query, ErrorMessage.createUnspecifiedError("Failed searching: " +
Exceptions.toMessageString(e), e));
}
else {
ErrorMessage error = ErrorMessage.createBadRequest("Invalid search request [" + request + "]: "
+ Exceptions.toMessageString(e));
log.log(LogLevel.DEBUG, error.getDetailedMessage());
return new Result(query, error);
}
} catch (LinkageError | StackOverflowError e) {
ErrorMessage error = ErrorMessage.createErrorInPluginSearcher("Error executing " + searchChain + "]: " +
Exceptions.toMessageString(e), e);
log(request, query, e);
return new Result(query, error);
} catch (Exception e) {
log(request, query, e);
return new Result(query, ErrorMessage.createUnspecifiedError("Failed searching: " +
Exceptions.toMessageString(e), e));
}
} | } catch (ParseException e) { | private Result search(String request, Query query, Chain<Searcher> searchChain) {
if (query.getTraceLevel() >= 2) {
query.trace("Invoking " + searchChain, false, 2);
}
if (searchConnections != null) {
connectionStatistics();
} else {
log.log(LogLevel.WARNING,
"searchConnections is a null reference, probably a known race condition during startup.",
new IllegalStateException("searchConnections reference is null."));
}
try {
return searchAndFill(query, searchChain);
} catch (ParseException e) {
ErrorMessage error = ErrorMessage.createIllegalQuery("Could not parse query [" + request + "]: "
+ Exceptions.toMessageString(e));
log.log(LogLevel.DEBUG, error::getDetailedMessage);
return new Result(query, error);
} catch (IllegalArgumentException e) {
if ("Comparison method violates its general contract!".equals(e.getMessage())) {
log(request, query, e);
return new Result(query, ErrorMessage.createUnspecifiedError("Failed searching: " +
Exceptions.toMessageString(e), e));
}
else {
ErrorMessage error = ErrorMessage.createBadRequest("Invalid search request [" + request + "]: "
+ Exceptions.toMessageString(e));
log.log(LogLevel.DEBUG, error::getDetailedMessage);
return new Result(query, error);
}
} catch (LinkageError | StackOverflowError e) {
ErrorMessage error = ErrorMessage.createErrorInPluginSearcher("Error executing " + searchChain + "]: " +
Exceptions.toMessageString(e), e);
log(request, query, e);
return new Result(query, error);
} catch (Exception e) {
log(request, query, e);
return new Result(query, ErrorMessage.createUnspecifiedError("Failed searching: " +
Exceptions.toMessageString(e), e));
}
} | class MeanConnections implements Callback {
@Override
public void run(Handle h, boolean firstTime) {
if (firstTime) {
metric.set(SEARCH_CONNECTIONS, 0.0d, null);
return;
}
Value v = (Value) h;
metric.set(SEARCH_CONNECTIONS, v.getMean(), null);
}
} | class MeanConnections implements Callback {
@Override
public void run(Handle h, boolean firstTime) {
if (firstTime) {
metric.set(SEARCH_CONNECTIONS, 0.0d, null);
return;
}
Value v = (Value) h;
metric.set(SEARCH_CONNECTIONS, v.getMean(), null);
}
} |
Yes, thanks, forgot a copy here! | public RevisionHistory with(ApplicationVersion revision, JobId job) {
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(this.development);
NavigableMap<RevisionId, ApplicationVersion> revisions = development.computeIfAbsent(job, __ -> new TreeMap<>());
if ( ! revisions.isEmpty()) revisions.compute(revisions.lastKey(), (__, last) -> last.withoutPackage());
revisions.put(revision.id(), revision);
return new RevisionHistory(production, development);
} | NavigableMap<RevisionId, ApplicationVersion> revisions = development.computeIfAbsent(job, __ -> new TreeMap<>()); | public RevisionHistory with(ApplicationVersion revision, JobId job) {
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(this.development);
NavigableMap<RevisionId, ApplicationVersion> revisions = development.computeIfAbsent(job, __ -> new TreeMap<>());
if ( ! revisions.isEmpty()) revisions.compute(revisions.lastKey(), (__, last) -> last.withoutPackage());
revisions.put(revision.id(), revision);
return new RevisionHistory(production, development);
} | class RevisionHistory {
private static final Comparator<JobId> comparator = Comparator.comparing(JobId::application).thenComparing(JobId::type);
private final NavigableMap<RevisionId, ApplicationVersion> production;
private final NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development;
private RevisionHistory(NavigableMap<RevisionId, ApplicationVersion> production,
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development) {
this.production = production;
this.development = development;
}
public static RevisionHistory empty() {
return ofRevisions(List.of(), Map.of());
}
public static RevisionHistory ofRevisions(Collection<ApplicationVersion> productionRevisions,
Map<JobId, ? extends Collection<ApplicationVersion>> developmentRevisions) {
NavigableMap<RevisionId, ApplicationVersion> production = new TreeMap<>();
for (ApplicationVersion revision : productionRevisions)
production.put(revision.id(), revision);
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(comparator);
developmentRevisions.forEach((job, jobRevisions) -> {
NavigableMap<RevisionId, ApplicationVersion> revisions = development.computeIfAbsent(job, __ -> new TreeMap<>());
for (ApplicationVersion revision : jobRevisions)
revisions.put(revision.id(), revision);
});
return new RevisionHistory(production, development);
}
/** Returns a copy of this with given production revision forgotten. */
public RevisionHistory without(RevisionId id) {
if ( ! production.containsKey(id)) return this;
TreeMap<RevisionId, ApplicationVersion> production = new TreeMap<>(this.production);
production.remove(id);
return new RevisionHistory(production, development);
}
/** Returns a copy of this with the given development revision forgotten. */
public RevisionHistory without(RevisionId id, JobId job) {
if ( ! development.containsKey(job) || ! development.get(job).containsKey(id)) return this;
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(this.development);
development.get(job).remove(id);
return new RevisionHistory(production, development);
}
/** Returns a copy of this with the production revision added or updated */
public RevisionHistory with(ApplicationVersion revision) {
NavigableMap<RevisionId, ApplicationVersion> production = new TreeMap<>(this.production);
production.put(revision.id(), revision);
return new RevisionHistory(production, development);
}
/** Returns a copy of this with the new development revision added, and the previous version without a package. */
private static ApplicationVersion revisionOf(RevisionId id, boolean production) {
return new ApplicationVersion(Optional.empty(), OptionalLong.of(id.number()), Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(),
! production, Optional.empty(), false, false);
}
/** Returns the production {@link ApplicationVersion} with this revision ID. */
public ApplicationVersion get(RevisionId id) {
return production.getOrDefault(id, revisionOf(id, true));
}
/** Returns the development {@link ApplicationVersion} for the give job, with this revision ID. */
public ApplicationVersion get(RevisionId id, JobId job) {
return development.getOrDefault(job, Collections.emptyNavigableMap())
.getOrDefault(id, revisionOf(id, false));
}
/** Returns the last submitted production build. */
public Optional<ApplicationVersion> last() {
return Optional.ofNullable(production.lastEntry()).map(Map.Entry::getValue);
}
/** Returns all known production revisions we still have the package for, from oldest to newest. */
public List<ApplicationVersion> withPackage() {
return production.values().stream()
.filter(ApplicationVersion::hasPackage)
.collect(toList());
}
/** Returns the currently deployable revisions of the application. */
public Deque<ApplicationVersion> deployable(boolean ascending) {
Deque<ApplicationVersion> versions = new ArrayDeque<>();
String previousHash = "";
for (ApplicationVersion version : withPackage()) {
if (version.isDeployable() && (version.bundleHash().isEmpty() || ! previousHash.equals(version.bundleHash().get()))) {
if (ascending) versions.addLast(version);
else versions.addFirst(version);
}
previousHash = version.bundleHash().orElse("");
}
return versions;
}
/** All known production revisions, in ascending order. */
public List<ApplicationVersion> production() {
return List.copyOf(production.values());
}
/* All known development revisions, in ascending order, per job. */
public NavigableMap<JobId, List<ApplicationVersion>> development() {
NavigableMap<JobId, List<ApplicationVersion>> copy = new TreeMap<>(comparator);
development.forEach((job, revisions) -> copy.put(job, List.copyOf(revisions.values())));
return Collections.unmodifiableNavigableMap(copy);
}
} | class RevisionHistory {
private static final Comparator<JobId> comparator = Comparator.comparing(JobId::application).thenComparing(JobId::type);
private final NavigableMap<RevisionId, ApplicationVersion> production;
private final NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development;
private RevisionHistory(NavigableMap<RevisionId, ApplicationVersion> production,
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development) {
this.production = production;
this.development = development;
}
public static RevisionHistory empty() {
return ofRevisions(List.of(), Map.of());
}
public static RevisionHistory ofRevisions(Collection<ApplicationVersion> productionRevisions,
Map<JobId, ? extends Collection<ApplicationVersion>> developmentRevisions) {
NavigableMap<RevisionId, ApplicationVersion> production = new TreeMap<>();
for (ApplicationVersion revision : productionRevisions)
production.put(revision.id(), revision);
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(comparator);
developmentRevisions.forEach((job, jobRevisions) -> {
NavigableMap<RevisionId, ApplicationVersion> revisions = development.computeIfAbsent(job, __ -> new TreeMap<>());
for (ApplicationVersion revision : jobRevisions)
revisions.put(revision.id(), revision);
});
return new RevisionHistory(production, development);
}
/** Returns a copy of this without any production revisions older than the given. */
public RevisionHistory withoutOlderThan(RevisionId id) {
if (production.headMap(id).isEmpty()) return this;
return new RevisionHistory(production.tailMap(id, true), development);
}
/** Returns a copy of this without any development revisions older than the given. */
public RevisionHistory withoutOlderThan(RevisionId id, JobId job) {
if ( ! development.containsKey(job) || development.get(job).headMap(id).isEmpty()) return this;
NavigableMap<JobId, NavigableMap<RevisionId, ApplicationVersion>> development = new TreeMap<>(this.development);
development.compute(job, (__, revisions) -> revisions.tailMap(id, true));
return new RevisionHistory(production, development);
}
/** Returns a copy of this with the production revision added or updated */
public RevisionHistory with(ApplicationVersion revision) {
NavigableMap<RevisionId, ApplicationVersion> production = new TreeMap<>(this.production);
production.put(revision.id(), revision);
return new RevisionHistory(production, development);
}
/** Returns a copy of this with the new development revision added, and the previous version without a package. */
private static ApplicationVersion revisionOf(RevisionId id, boolean production) {
return new ApplicationVersion(Optional.empty(), OptionalLong.of(id.number()), Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(),
! production, Optional.empty(), false, false);
}
/** Returns the production {@link ApplicationVersion} with this revision ID. */
public ApplicationVersion get(RevisionId id) {
return production.getOrDefault(id, revisionOf(id, true));
}
/** Returns the development {@link ApplicationVersion} for the give job, with this revision ID. */
public ApplicationVersion get(RevisionId id, JobId job) {
return development.getOrDefault(job, Collections.emptyNavigableMap())
.getOrDefault(id, revisionOf(id, false));
}
/** Returns the last submitted production build. */
public Optional<ApplicationVersion> last() {
return Optional.ofNullable(production.lastEntry()).map(Map.Entry::getValue);
}
/** Returns all known production revisions we still have the package for, from oldest to newest. */
public List<ApplicationVersion> withPackage() {
return production.values().stream()
.filter(ApplicationVersion::hasPackage)
.collect(toList());
}
/** Returns the currently deployable revisions of the application. */
public Deque<ApplicationVersion> deployable(boolean ascending) {
Deque<ApplicationVersion> versions = new ArrayDeque<>();
String previousHash = "";
for (ApplicationVersion version : withPackage()) {
if (version.isDeployable() && (version.bundleHash().isEmpty() || ! previousHash.equals(version.bundleHash().get()))) {
if (ascending) versions.addLast(version);
else versions.addFirst(version);
}
previousHash = version.bundleHash().orElse("");
}
return versions;
}
/** All known production revisions, in ascending order. */
public List<ApplicationVersion> production() {
return List.copyOf(production.values());
}
/* All known development revisions, in ascending order, per job. */
public NavigableMap<JobId, List<ApplicationVersion>> development() {
NavigableMap<JobId, List<ApplicationVersion>> copy = new TreeMap<>(comparator);
development.forEach((job, revisions) -> copy.put(job, List.copyOf(revisions.values())));
return Collections.unmodifiableNavigableMap(copy);
}
} |
/source/config-model/src/main/java/com/yahoo/vespa/model/container/search/ContainerSearch.java:[62,44] cannot find symbol symbol: class RpcResourcePoolComponent location: class com.yahoo.vespa.model.container.search.ContainerSearch [INFO] 1 error I guess you mean RpcResourcePool | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
owningCluster.addComponent(new DispatcherComponent((IndexedSearchCluster)searchCluster));
owningCluster.addComponent(new RpcResourcePoolComponent((IndexedSearchCluster)searchCluster));
}
} | owningCluster.addComponent(new RpcResourcePoolComponent((IndexedSearchCluster)searchCluster)); | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
owningCluster.addComponent(new DispatcherComponent((IndexedSearchCluster)searchCluster));
owningCluster.addComponent(new RpcResourcePoolComponent((IndexedSearchCluster)searchCluster));
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} |
No the class name is correct, but I forgot to do 'git add'. | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
owningCluster.addComponent(new DispatcherComponent((IndexedSearchCluster)searchCluster));
owningCluster.addComponent(new RpcResourcePoolComponent((IndexedSearchCluster)searchCluster));
}
} | owningCluster.addComponent(new RpcResourcePoolComponent((IndexedSearchCluster)searchCluster)); | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
owningCluster.addComponent(new DispatcherComponent((IndexedSearchCluster)searchCluster));
owningCluster.addComponent(new RpcResourcePoolComponent((IndexedSearchCluster)searchCluster));
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} |
Suggest to use `var` for these, as their type is clear from the right hand side. | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
Component dispatcher = new DispatcherComponent((IndexedSearchCluster)searchCluster);
Component rpcResoucePool = new RpcResourcePoolComponent();
dispatcher.injectForName(rpcResoucePool.getComponentId().getName(), rpcResoucePool);
dispatcher.addComponent(rpcResoucePool);
owningCluster.addComponent(dispatcher);
}
} | Component rpcResoucePool = new RpcResourcePoolComponent(); | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
Component dispatcher = new DispatcherComponent((IndexedSearchCluster)searchCluster);
var rpcResoucePool = new RpcResourcePoolComponent();
dispatcher.inject(rpcResoucePool);
dispatcher.addComponent(rpcResoucePool);
owningCluster.addComponent(dispatcher);
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} |
Just `inject()` will do. The name is only used with the `@Named` annotation for named constructor params, which is useful when the same ctor needs multiple instances of the same class. | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
Component dispatcher = new DispatcherComponent((IndexedSearchCluster)searchCluster);
Component rpcResoucePool = new RpcResourcePoolComponent();
dispatcher.injectForName(rpcResoucePool.getComponentId().getName(), rpcResoucePool);
dispatcher.addComponent(rpcResoucePool);
owningCluster.addComponent(dispatcher);
}
} | dispatcher.injectForName(rpcResoucePool.getComponentId().getName(), rpcResoucePool); | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
Component dispatcher = new DispatcherComponent((IndexedSearchCluster)searchCluster);
var rpcResoucePool = new RpcResourcePoolComponent();
dispatcher.inject(rpcResoucePool);
dispatcher.addComponent(rpcResoucePool);
owningCluster.addComponent(dispatcher);
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} |
container-search is available from the model, so the class object can be used instead of a string. It will then fail compile-time instead of runtime if the class is moved or renamed. | private static ComponentModel toComponentModel() {
String className = "com.yahoo.search.dispatch.rpc.RpcResourcePool";
return new ComponentModel(className, className, BundleMapper.searchAndDocprocBundle, null);
} | String className = "com.yahoo.search.dispatch.rpc.RpcResourcePool"; | private static ComponentModel toComponentModel() {
String className = com.yahoo.search.dispatch.rpc.RpcResourcePool.class.getName();
return new ComponentModel(className, className, BundleMapper.searchAndDocprocBundle, null);
} | class RpcResourcePoolComponent extends Component<RpcResourcePoolComponent, ComponentModel> {
public RpcResourcePoolComponent() {
super(toComponentModel());
}
} | class RpcResourcePoolComponent extends Component<RpcResourcePoolComponent, ComponentModel> {
public RpcResourcePoolComponent() {
super(toComponentModel());
}
} |
Ok, fixed | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
Component dispatcher = new DispatcherComponent((IndexedSearchCluster)searchCluster);
Component rpcResoucePool = new RpcResourcePoolComponent();
dispatcher.injectForName(rpcResoucePool.getComponentId().getName(), rpcResoucePool);
dispatcher.addComponent(rpcResoucePool);
owningCluster.addComponent(dispatcher);
}
} | dispatcher.injectForName(rpcResoucePool.getComponentId().getName(), rpcResoucePool); | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
Component dispatcher = new DispatcherComponent((IndexedSearchCluster)searchCluster);
var rpcResoucePool = new RpcResourcePoolComponent();
dispatcher.inject(rpcResoucePool);
dispatcher.addComponent(rpcResoucePool);
owningCluster.addComponent(dispatcher);
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} |
Can use 'var' for rpcResourcePool, but due to some generics mumbo jumbo with addComponent it was not possible for the dispatcher. | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
Component dispatcher = new DispatcherComponent((IndexedSearchCluster)searchCluster);
Component rpcResoucePool = new RpcResourcePoolComponent();
dispatcher.injectForName(rpcResoucePool.getComponentId().getName(), rpcResoucePool);
dispatcher.addComponent(rpcResoucePool);
owningCluster.addComponent(dispatcher);
}
} | Component rpcResoucePool = new RpcResourcePoolComponent(); | private void initializeDispatchers(Collection<AbstractSearchCluster> searchClusters) {
for (AbstractSearchCluster searchCluster : searchClusters) {
if ( ! ( searchCluster instanceof IndexedSearchCluster)) continue;
Component dispatcher = new DispatcherComponent((IndexedSearchCluster)searchCluster);
var rpcResoucePool = new RpcResourcePoolComponent();
dispatcher.inject(rpcResoucePool);
dispatcher.addComponent(rpcResoucePool);
owningCluster.addComponent(dispatcher);
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private ApplicationContainerCluster owningCluster;
private final List<AbstractSearchCluster> searchClusters = new LinkedList<>();
private final Options options;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.owningCluster = cluster;
this.options = options;
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
this.searchClusters.addAll(searchClusters.values());
initializeDispatchers(searchClusters.values());
initializeSearchChains(searchClusters);
}
/** Adds a Dispatcher component to the owning container cluster for each search cluster */
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider : getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider : getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles != null) {
queryProfiles.getConfig(builder);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules != null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates != null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : searchClusters) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < searchClusters.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(searchClusters, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if ( ! (sys instanceof IndexedSearchCluster)) {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index)
return sys;
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/** Encapsulates qrserver options. */
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} |
Thanks, done. | private static ComponentModel toComponentModel() {
String className = "com.yahoo.search.dispatch.rpc.RpcResourcePool";
return new ComponentModel(className, className, BundleMapper.searchAndDocprocBundle, null);
} | String className = "com.yahoo.search.dispatch.rpc.RpcResourcePool"; | private static ComponentModel toComponentModel() {
String className = com.yahoo.search.dispatch.rpc.RpcResourcePool.class.getName();
return new ComponentModel(className, className, BundleMapper.searchAndDocprocBundle, null);
} | class RpcResourcePoolComponent extends Component<RpcResourcePoolComponent, ComponentModel> {
public RpcResourcePoolComponent() {
super(toComponentModel());
}
} | class RpcResourcePoolComponent extends Component<RpcResourcePoolComponent, ComponentModel> {
public RpcResourcePoolComponent() {
super(toComponentModel());
}
} |
To var or not to var, that is the question. | public void onlySuccessfulRunExpiresThenAnotherFails() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
var app = tester.newDeploymentContext().submit();
JobId jobId = new JobId(app.instanceId(), systemTest);
assertFalse(jobs.lastSuccess(jobId).isPresent());
app.runJob(systemTest);
assertTrue(jobs.lastSuccess(jobId).isPresent());
assertEquals(1, jobs.runs(jobId).size());
tester.clock().advance(JobController.maxHistoryAge.plusSeconds(1));
app.submit();
app.failDeployment(systemTest);
assertFalse(jobs.lastSuccess(jobId).isPresent());
assertEquals(1, jobs.runs(jobId).size());
} | JobController jobs = tester.controller().jobController(); | public void onlySuccessfulRunExpiresThenAnotherFails() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
var app = tester.newDeploymentContext().submit();
JobId jobId = new JobId(app.instanceId(), systemTest);
assertFalse(jobs.lastSuccess(jobId).isPresent());
app.runJob(systemTest);
assertTrue(jobs.lastSuccess(jobId).isPresent());
assertEquals(1, jobs.runs(jobId).size());
tester.clock().advance(JobController.maxHistoryAge.plusSeconds(1));
app.submit();
app.failDeployment(systemTest);
assertFalse(jobs.lastSuccess(jobId).isPresent());
assertEquals(1, jobs.runs(jobId).size());
} | class JobRunnerTest {
private static final ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
private static final Versions versions = new Versions(Version.fromString("1.2.3"),
ApplicationVersion.from(new SourceRevision("repo",
"branch",
"bada55"),
321),
Optional.empty(),
Optional.empty());
@Test
public void multiThreadedExecutionFinishes() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
StepRunner stepRunner = (step, id) -> id.type() == stagingTest && step.get() == startTests? Optional.of(error) : Optional.of(running);
Phaser phaser = new Phaser(1);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
phasedExecutor(phaser), stepRunner);
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
jobs.start(id, systemTest, versions);
try {
jobs.start(id, systemTest, versions);
fail("Job is already running, so this should not be allowed!");
}
catch (IllegalStateException e) { }
jobs.start(id, stagingTest, versions);
assertTrue(jobs.last(id, systemTest).get().stepStatuses().values().stream().allMatch(unfinished::equals));
assertFalse(jobs.last(id, systemTest).get().hasEnded());
assertTrue(jobs.last(id, stagingTest).get().stepStatuses().values().stream().allMatch(unfinished::equals));
assertFalse(jobs.last(id, stagingTest).get().hasEnded());
runner.maintain();
phaser.arriveAndAwaitAdvance();
assertTrue(jobs.last(id, systemTest).get().stepStatuses().values().stream().allMatch(succeeded::equals));
assertTrue(jobs.last(id, stagingTest).get().hasEnded());
assertTrue(jobs.last(id, stagingTest).get().hasFailed());
}
@Test
public void stepLogic() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
Supplier<Run> run = () -> jobs.last(id, systemTest).get();
jobs.start(id, systemTest, versions);
RunId first = run.get().id();
Map<Step, Status> steps = run.get().stepStatuses();
runner.maintain();
assertEquals(steps, run.get().stepStatuses());
assertEquals(List.of(deployTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester);
outcomes.put(deployTester, running);
runner.maintain();
assertEquals(List.of(installTester, deployReal), run.get().readySteps());
assertStepsWithStartTime(run.get(), installTester, deployTester, deployReal);
outcomes.put(deployReal, running);
runner.maintain();
assertEquals(List.of(installTester, installReal), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal);
outcomes.put(installReal, running);
runner.maintain();
assertEquals(List.of(installTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal);
outcomes.put(installTester, running);
runner.maintain();
assertEquals(List.of(startTests), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests);
outcomes.put(startTests, running);
runner.maintain();
assertEquals(List.of(endTests), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests);
outcomes.put(endTests, testFailure);
runner.maintain();
assertTrue(run.get().hasFailed());
assertEquals(List.of(copyVespaLogs, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester);
outcomes.put(copyVespaLogs, running);
runner.maintain();
assertEquals(List.of(deactivateReal, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester, deactivateReal);
jobs.abort(run.get().id());
runner.maintain();
assertEquals(List.of(deactivateReal, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester, deactivateReal);
outcomes.put(deactivateReal, running);
outcomes.put(deactivateTester, running);
outcomes.put(report, running);
runner.maintain();
assertTrue(run.get().hasFailed());
assertTrue(run.get().hasEnded());
assertTrue(run.get().status() == aborted);
jobs.start(id, systemTest, versions);
assertEquals(first.number() + 1, run.get().id().number());
outcomes.put(deployTester, error);
runner.maintain();
assertTrue(run.get().hasEnded());
assertTrue(run.get().hasFailed());
assertFalse(run.get().status() == aborted);
assertEquals(failed, run.get().stepStatuses().get(deployTester));
assertEquals(unfinished, run.get().stepStatuses().get(installTester));
assertEquals(succeeded, run.get().stepStatuses().get(report));
assertStepsWithStartTime(run.get(), deployTester, copyVespaLogs, deactivateTester, deactivateReal, report);
assertEquals(2, jobs.runs(id, systemTest).size());
jobs.start(id, systemTest, versions);
tester.applications().deleteInstance(id);
runner.maintain();
assertFalse(jobs.last(id, systemTest).isPresent());
assertTrue(jobs.runs(id, systemTest).isEmpty());
}
private void assertStepsWithStartTime(Run lastRun, Step... stepsWithStartTime) {
Set<Step> actualStepsWithStartTime = lastRun.steps().entrySet().stream()
.filter(entry -> entry.getValue().startTime().isPresent())
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
assertEquals(Set.of(stepsWithStartTime), actualStepsWithStartTime);
}
@Test
public void locksAndGarbage() throws InterruptedException, BrokenBarrierException {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
CyclicBarrier barrier = new CyclicBarrier(2);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
Executors.newFixedThreadPool(32), waitingRunner(barrier));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
RunId runId = new RunId(id, systemTest, 1);
jobs.start(id, systemTest, versions);
runner.maintain();
barrier.await();
try {
jobs.locked(id, systemTest, deactivateTester, step -> { });
fail("deployTester step should still be locked!");
}
catch (TimeoutException e) { }
assertEquals(Collections.singletonList(runId), jobs.active().stream().map(run -> run.id()).collect(Collectors.toList()));
tester.controllerTester().controller().applications().deleteApplication(TenantAndApplicationId.from(id), tester.controllerTester().credentialsFor(id.tenant()));
assertEquals(Collections.emptyList(), jobs.active());
assertEquals(runId, jobs.last(id, systemTest).get().id());
runner.maintain();
assertEquals(runId, jobs.last(id, systemTest).get().id());
barrier.await();
runner.maintain();
assertEquals(Optional.empty(), jobs.last(id, systemTest));
}
@Test
public void historyPruning() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), (id, step) -> Optional.of(running));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId instanceId = appId.defaultInstance();
JobId jobId = new JobId(instanceId, systemTest);
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
assertFalse(jobs.lastSuccess(jobId).isPresent());
for (int i = 0; i < jobs.historyLength(); i++) {
jobs.start(instanceId, systemTest, versions);
runner.run();
}
assertEquals(64, jobs.runs(jobId).size());
assertTrue(jobs.details(new RunId(instanceId, systemTest, 1)).isPresent());
jobs.start(instanceId, systemTest, versions);
runner.run();
assertEquals(64, jobs.runs(jobId).size());
assertEquals(2, jobs.runs(jobId).keySet().iterator().next().number());
assertFalse(jobs.details(new RunId(instanceId, systemTest, 1)).isPresent());
assertTrue(jobs.details(new RunId(instanceId, systemTest, 65)).isPresent());
JobRunner failureRunner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), (id, step) -> Optional.of(error));
for (int i = 0; i < jobs.historyLength() - 1; i++) {
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
}
assertEquals(64, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(65, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(66, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(66, jobs.runs(jobId).keySet().stream().skip(1).iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(66, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(66, jobs.runs(jobId).keySet().stream().skip(1).iterator().next().number());
assertEquals(68, jobs.runs(jobId).keySet().stream().skip(2).iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
runner.run();
assertEquals(64, jobs.runs(jobId).size());
assertEquals(69, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(132, jobs.lastSuccess(jobId).get().id().number());
assertFalse(jobs.firstFailing(jobId).isPresent());
}
@Test
@Test
public void timeout() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
jobs.start(id, systemTest, versions);
tester.clock().advance(JobRunner.jobTimeout.plus(Duration.ofSeconds(1)));
runner.run();
assertSame(aborted, jobs.last(id, systemTest).get().status());
}
@Test
public void jobMetrics() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
for (RunStatus status : RunStatus.values()) {
if (status == success) continue;
outcomes.put(deployTester, status);
jobs.start(id, systemTest, versions);
runner.run();
jobs.finish(jobs.last(id, systemTest).get().id());
}
Map<String, String> context = Map.of("applicationId", "tenant.real.default",
"tenantName", "tenant",
"app", "real.default",
"test", "true",
"zone", "test.us-east-1");
MetricsMock metric = ((MetricsMock) tester.controller().metric());
assertEquals(RunStatus.values().length - 1, metric.getMetric(context::equals, JobMetrics.start).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.abort).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.error).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.success).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.convergenceFailure).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.deploymentFailure).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.outOfCapacity).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.testFailure).get().intValue());
}
public static ExecutorService inThreadExecutor() {
return new AbstractExecutorService() {
AtomicBoolean shutDown = new AtomicBoolean(false);
@Override public void shutdown() { shutDown.set(true); }
@Override public List<Runnable> shutdownNow() { shutDown.set(true); return Collections.emptyList(); }
@Override public boolean isShutdown() { return shutDown.get(); }
@Override public boolean isTerminated() { return shutDown.get(); }
@Override public boolean awaitTermination(long timeout, TimeUnit unit) { return true; }
@Override public void execute(Runnable command) { command.run(); }
};
}
private static ExecutorService phasedExecutor(Phaser phaser) {
return new AbstractExecutorService() {
ExecutorService delegate = Executors.newFixedThreadPool(32);
@Override public void shutdown() { delegate.shutdown(); }
@Override public List<Runnable> shutdownNow() { return delegate.shutdownNow(); }
@Override public boolean isShutdown() { return delegate.isShutdown(); }
@Override public boolean isTerminated() { return delegate.isTerminated(); }
@Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { return delegate.awaitTermination(timeout, unit); }
@Override public void execute(Runnable command) {
phaser.register();
delegate.execute(() -> {
command.run();
phaser.arriveAndDeregister();
});
}
};
}
private static StepRunner mappedRunner(Map<Step, RunStatus> outcomes) {
return (step, id) -> Optional.ofNullable(outcomes.get(step.get()));
}
private static StepRunner waitingRunner(CyclicBarrier barrier) {
return (step, id) -> {
try {
if (step.get() == deployTester) {
barrier.await();
barrier.reset();
barrier.await();
}
}
catch (InterruptedException | BrokenBarrierException e) {
throw new AssertionError(e);
}
return Optional.of(running);
};
}
} | class JobRunnerTest {
private static final ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
private static final Versions versions = new Versions(Version.fromString("1.2.3"),
ApplicationVersion.from(new SourceRevision("repo",
"branch",
"bada55"),
321),
Optional.empty(),
Optional.empty());
@Test
public void multiThreadedExecutionFinishes() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
StepRunner stepRunner = (step, id) -> id.type() == stagingTest && step.get() == startTests? Optional.of(error) : Optional.of(running);
Phaser phaser = new Phaser(1);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
phasedExecutor(phaser), stepRunner);
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
jobs.start(id, systemTest, versions);
try {
jobs.start(id, systemTest, versions);
fail("Job is already running, so this should not be allowed!");
}
catch (IllegalStateException e) { }
jobs.start(id, stagingTest, versions);
assertTrue(jobs.last(id, systemTest).get().stepStatuses().values().stream().allMatch(unfinished::equals));
assertFalse(jobs.last(id, systemTest).get().hasEnded());
assertTrue(jobs.last(id, stagingTest).get().stepStatuses().values().stream().allMatch(unfinished::equals));
assertFalse(jobs.last(id, stagingTest).get().hasEnded());
runner.maintain();
phaser.arriveAndAwaitAdvance();
assertTrue(jobs.last(id, systemTest).get().stepStatuses().values().stream().allMatch(succeeded::equals));
assertTrue(jobs.last(id, stagingTest).get().hasEnded());
assertTrue(jobs.last(id, stagingTest).get().hasFailed());
}
@Test
public void stepLogic() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
Supplier<Run> run = () -> jobs.last(id, systemTest).get();
jobs.start(id, systemTest, versions);
RunId first = run.get().id();
Map<Step, Status> steps = run.get().stepStatuses();
runner.maintain();
assertEquals(steps, run.get().stepStatuses());
assertEquals(List.of(deployTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester);
outcomes.put(deployTester, running);
runner.maintain();
assertEquals(List.of(installTester, deployReal), run.get().readySteps());
assertStepsWithStartTime(run.get(), installTester, deployTester, deployReal);
outcomes.put(deployReal, running);
runner.maintain();
assertEquals(List.of(installTester, installReal), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal);
outcomes.put(installReal, running);
runner.maintain();
assertEquals(List.of(installTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal);
outcomes.put(installTester, running);
runner.maintain();
assertEquals(List.of(startTests), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests);
outcomes.put(startTests, running);
runner.maintain();
assertEquals(List.of(endTests), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests);
outcomes.put(endTests, testFailure);
runner.maintain();
assertTrue(run.get().hasFailed());
assertEquals(List.of(copyVespaLogs, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester);
outcomes.put(copyVespaLogs, running);
runner.maintain();
assertEquals(List.of(deactivateReal, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester, deactivateReal);
jobs.abort(run.get().id());
runner.maintain();
assertEquals(List.of(deactivateReal, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester, deactivateReal);
outcomes.put(deactivateReal, running);
outcomes.put(deactivateTester, running);
outcomes.put(report, running);
runner.maintain();
assertTrue(run.get().hasFailed());
assertTrue(run.get().hasEnded());
assertTrue(run.get().status() == aborted);
jobs.start(id, systemTest, versions);
assertEquals(first.number() + 1, run.get().id().number());
outcomes.put(deployTester, error);
runner.maintain();
assertTrue(run.get().hasEnded());
assertTrue(run.get().hasFailed());
assertFalse(run.get().status() == aborted);
assertEquals(failed, run.get().stepStatuses().get(deployTester));
assertEquals(unfinished, run.get().stepStatuses().get(installTester));
assertEquals(succeeded, run.get().stepStatuses().get(report));
assertStepsWithStartTime(run.get(), deployTester, copyVespaLogs, deactivateTester, deactivateReal, report);
assertEquals(2, jobs.runs(id, systemTest).size());
jobs.start(id, systemTest, versions);
tester.applications().deleteInstance(id);
runner.maintain();
assertFalse(jobs.last(id, systemTest).isPresent());
assertTrue(jobs.runs(id, systemTest).isEmpty());
}
private void assertStepsWithStartTime(Run lastRun, Step... stepsWithStartTime) {
Set<Step> actualStepsWithStartTime = lastRun.steps().entrySet().stream()
.filter(entry -> entry.getValue().startTime().isPresent())
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
assertEquals(Set.of(stepsWithStartTime), actualStepsWithStartTime);
}
@Test
public void locksAndGarbage() throws InterruptedException, BrokenBarrierException {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
CyclicBarrier barrier = new CyclicBarrier(2);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
Executors.newFixedThreadPool(32), waitingRunner(barrier));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
RunId runId = new RunId(id, systemTest, 1);
jobs.start(id, systemTest, versions);
runner.maintain();
barrier.await();
try {
jobs.locked(id, systemTest, deactivateTester, step -> { });
fail("deployTester step should still be locked!");
}
catch (TimeoutException e) { }
assertEquals(Collections.singletonList(runId), jobs.active().stream().map(run -> run.id()).collect(Collectors.toList()));
tester.controllerTester().controller().applications().deleteApplication(TenantAndApplicationId.from(id), tester.controllerTester().credentialsFor(id.tenant()));
assertEquals(Collections.emptyList(), jobs.active());
assertEquals(runId, jobs.last(id, systemTest).get().id());
runner.maintain();
assertEquals(runId, jobs.last(id, systemTest).get().id());
barrier.await();
runner.maintain();
assertEquals(Optional.empty(), jobs.last(id, systemTest));
}
@Test
public void historyPruning() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), (id, step) -> Optional.of(running));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId instanceId = appId.defaultInstance();
JobId jobId = new JobId(instanceId, systemTest);
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
assertFalse(jobs.lastSuccess(jobId).isPresent());
for (int i = 0; i < jobs.historyLength(); i++) {
jobs.start(instanceId, systemTest, versions);
runner.run();
}
assertEquals(64, jobs.runs(jobId).size());
assertTrue(jobs.details(new RunId(instanceId, systemTest, 1)).isPresent());
jobs.start(instanceId, systemTest, versions);
runner.run();
assertEquals(64, jobs.runs(jobId).size());
assertEquals(2, jobs.runs(jobId).keySet().iterator().next().number());
assertFalse(jobs.details(new RunId(instanceId, systemTest, 1)).isPresent());
assertTrue(jobs.details(new RunId(instanceId, systemTest, 65)).isPresent());
JobRunner failureRunner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), (id, step) -> Optional.of(error));
for (int i = 0; i < jobs.historyLength() - 1; i++) {
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
}
assertEquals(64, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(65, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(66, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(66, jobs.runs(jobId).keySet().stream().skip(1).iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(66, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(66, jobs.runs(jobId).keySet().stream().skip(1).iterator().next().number());
assertEquals(68, jobs.runs(jobId).keySet().stream().skip(2).iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
runner.run();
assertEquals(64, jobs.runs(jobId).size());
assertEquals(69, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(132, jobs.lastSuccess(jobId).get().id().number());
assertFalse(jobs.firstFailing(jobId).isPresent());
}
@Test
@Test
public void timeout() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
jobs.start(id, systemTest, versions);
tester.clock().advance(JobRunner.jobTimeout.plus(Duration.ofSeconds(1)));
runner.run();
assertSame(aborted, jobs.last(id, systemTest).get().status());
}
@Test
public void jobMetrics() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
for (RunStatus status : RunStatus.values()) {
if (status == success) continue;
outcomes.put(deployTester, status);
jobs.start(id, systemTest, versions);
runner.run();
jobs.finish(jobs.last(id, systemTest).get().id());
}
Map<String, String> context = Map.of("applicationId", "tenant.real.default",
"tenantName", "tenant",
"app", "real.default",
"test", "true",
"zone", "test.us-east-1");
MetricsMock metric = ((MetricsMock) tester.controller().metric());
assertEquals(RunStatus.values().length - 1, metric.getMetric(context::equals, JobMetrics.start).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.abort).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.error).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.success).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.convergenceFailure).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.deploymentFailure).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.outOfCapacity).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.testFailure).get().intValue());
}
public static ExecutorService inThreadExecutor() {
return new AbstractExecutorService() {
AtomicBoolean shutDown = new AtomicBoolean(false);
@Override public void shutdown() { shutDown.set(true); }
@Override public List<Runnable> shutdownNow() { shutDown.set(true); return Collections.emptyList(); }
@Override public boolean isShutdown() { return shutDown.get(); }
@Override public boolean isTerminated() { return shutDown.get(); }
@Override public boolean awaitTermination(long timeout, TimeUnit unit) { return true; }
@Override public void execute(Runnable command) { command.run(); }
};
}
private static ExecutorService phasedExecutor(Phaser phaser) {
return new AbstractExecutorService() {
ExecutorService delegate = Executors.newFixedThreadPool(32);
@Override public void shutdown() { delegate.shutdown(); }
@Override public List<Runnable> shutdownNow() { return delegate.shutdownNow(); }
@Override public boolean isShutdown() { return delegate.isShutdown(); }
@Override public boolean isTerminated() { return delegate.isTerminated(); }
@Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { return delegate.awaitTermination(timeout, unit); }
@Override public void execute(Runnable command) {
phaser.register();
delegate.execute(() -> {
command.run();
phaser.arriveAndDeregister();
});
}
};
}
private static StepRunner mappedRunner(Map<Step, RunStatus> outcomes) {
return (step, id) -> Optional.ofNullable(outcomes.get(step.get()));
}
private static StepRunner waitingRunner(CyclicBarrier barrier) {
return (step, id) -> {
try {
if (step.get() == deployTester) {
barrier.await();
barrier.reset();
barrier.await();
}
}
catch (InterruptedException | BrokenBarrierException e) {
throw new AssertionError(e);
}
return Optional.of(running);
};
}
} |
Yeah, I'm not settled on that one yet :) | public void onlySuccessfulRunExpiresThenAnotherFails() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
var app = tester.newDeploymentContext().submit();
JobId jobId = new JobId(app.instanceId(), systemTest);
assertFalse(jobs.lastSuccess(jobId).isPresent());
app.runJob(systemTest);
assertTrue(jobs.lastSuccess(jobId).isPresent());
assertEquals(1, jobs.runs(jobId).size());
tester.clock().advance(JobController.maxHistoryAge.plusSeconds(1));
app.submit();
app.failDeployment(systemTest);
assertFalse(jobs.lastSuccess(jobId).isPresent());
assertEquals(1, jobs.runs(jobId).size());
} | JobController jobs = tester.controller().jobController(); | public void onlySuccessfulRunExpiresThenAnotherFails() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
var app = tester.newDeploymentContext().submit();
JobId jobId = new JobId(app.instanceId(), systemTest);
assertFalse(jobs.lastSuccess(jobId).isPresent());
app.runJob(systemTest);
assertTrue(jobs.lastSuccess(jobId).isPresent());
assertEquals(1, jobs.runs(jobId).size());
tester.clock().advance(JobController.maxHistoryAge.plusSeconds(1));
app.submit();
app.failDeployment(systemTest);
assertFalse(jobs.lastSuccess(jobId).isPresent());
assertEquals(1, jobs.runs(jobId).size());
} | class JobRunnerTest {
private static final ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
private static final Versions versions = new Versions(Version.fromString("1.2.3"),
ApplicationVersion.from(new SourceRevision("repo",
"branch",
"bada55"),
321),
Optional.empty(),
Optional.empty());
@Test
public void multiThreadedExecutionFinishes() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
StepRunner stepRunner = (step, id) -> id.type() == stagingTest && step.get() == startTests? Optional.of(error) : Optional.of(running);
Phaser phaser = new Phaser(1);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
phasedExecutor(phaser), stepRunner);
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
jobs.start(id, systemTest, versions);
try {
jobs.start(id, systemTest, versions);
fail("Job is already running, so this should not be allowed!");
}
catch (IllegalStateException e) { }
jobs.start(id, stagingTest, versions);
assertTrue(jobs.last(id, systemTest).get().stepStatuses().values().stream().allMatch(unfinished::equals));
assertFalse(jobs.last(id, systemTest).get().hasEnded());
assertTrue(jobs.last(id, stagingTest).get().stepStatuses().values().stream().allMatch(unfinished::equals));
assertFalse(jobs.last(id, stagingTest).get().hasEnded());
runner.maintain();
phaser.arriveAndAwaitAdvance();
assertTrue(jobs.last(id, systemTest).get().stepStatuses().values().stream().allMatch(succeeded::equals));
assertTrue(jobs.last(id, stagingTest).get().hasEnded());
assertTrue(jobs.last(id, stagingTest).get().hasFailed());
}
@Test
public void stepLogic() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
Supplier<Run> run = () -> jobs.last(id, systemTest).get();
jobs.start(id, systemTest, versions);
RunId first = run.get().id();
Map<Step, Status> steps = run.get().stepStatuses();
runner.maintain();
assertEquals(steps, run.get().stepStatuses());
assertEquals(List.of(deployTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester);
outcomes.put(deployTester, running);
runner.maintain();
assertEquals(List.of(installTester, deployReal), run.get().readySteps());
assertStepsWithStartTime(run.get(), installTester, deployTester, deployReal);
outcomes.put(deployReal, running);
runner.maintain();
assertEquals(List.of(installTester, installReal), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal);
outcomes.put(installReal, running);
runner.maintain();
assertEquals(List.of(installTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal);
outcomes.put(installTester, running);
runner.maintain();
assertEquals(List.of(startTests), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests);
outcomes.put(startTests, running);
runner.maintain();
assertEquals(List.of(endTests), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests);
outcomes.put(endTests, testFailure);
runner.maintain();
assertTrue(run.get().hasFailed());
assertEquals(List.of(copyVespaLogs, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester);
outcomes.put(copyVespaLogs, running);
runner.maintain();
assertEquals(List.of(deactivateReal, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester, deactivateReal);
jobs.abort(run.get().id());
runner.maintain();
assertEquals(List.of(deactivateReal, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester, deactivateReal);
outcomes.put(deactivateReal, running);
outcomes.put(deactivateTester, running);
outcomes.put(report, running);
runner.maintain();
assertTrue(run.get().hasFailed());
assertTrue(run.get().hasEnded());
assertTrue(run.get().status() == aborted);
jobs.start(id, systemTest, versions);
assertEquals(first.number() + 1, run.get().id().number());
outcomes.put(deployTester, error);
runner.maintain();
assertTrue(run.get().hasEnded());
assertTrue(run.get().hasFailed());
assertFalse(run.get().status() == aborted);
assertEquals(failed, run.get().stepStatuses().get(deployTester));
assertEquals(unfinished, run.get().stepStatuses().get(installTester));
assertEquals(succeeded, run.get().stepStatuses().get(report));
assertStepsWithStartTime(run.get(), deployTester, copyVespaLogs, deactivateTester, deactivateReal, report);
assertEquals(2, jobs.runs(id, systemTest).size());
jobs.start(id, systemTest, versions);
tester.applications().deleteInstance(id);
runner.maintain();
assertFalse(jobs.last(id, systemTest).isPresent());
assertTrue(jobs.runs(id, systemTest).isEmpty());
}
private void assertStepsWithStartTime(Run lastRun, Step... stepsWithStartTime) {
Set<Step> actualStepsWithStartTime = lastRun.steps().entrySet().stream()
.filter(entry -> entry.getValue().startTime().isPresent())
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
assertEquals(Set.of(stepsWithStartTime), actualStepsWithStartTime);
}
@Test
public void locksAndGarbage() throws InterruptedException, BrokenBarrierException {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
CyclicBarrier barrier = new CyclicBarrier(2);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
Executors.newFixedThreadPool(32), waitingRunner(barrier));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
RunId runId = new RunId(id, systemTest, 1);
jobs.start(id, systemTest, versions);
runner.maintain();
barrier.await();
try {
jobs.locked(id, systemTest, deactivateTester, step -> { });
fail("deployTester step should still be locked!");
}
catch (TimeoutException e) { }
assertEquals(Collections.singletonList(runId), jobs.active().stream().map(run -> run.id()).collect(Collectors.toList()));
tester.controllerTester().controller().applications().deleteApplication(TenantAndApplicationId.from(id), tester.controllerTester().credentialsFor(id.tenant()));
assertEquals(Collections.emptyList(), jobs.active());
assertEquals(runId, jobs.last(id, systemTest).get().id());
runner.maintain();
assertEquals(runId, jobs.last(id, systemTest).get().id());
barrier.await();
runner.maintain();
assertEquals(Optional.empty(), jobs.last(id, systemTest));
}
@Test
public void historyPruning() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), (id, step) -> Optional.of(running));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId instanceId = appId.defaultInstance();
JobId jobId = new JobId(instanceId, systemTest);
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
assertFalse(jobs.lastSuccess(jobId).isPresent());
for (int i = 0; i < jobs.historyLength(); i++) {
jobs.start(instanceId, systemTest, versions);
runner.run();
}
assertEquals(64, jobs.runs(jobId).size());
assertTrue(jobs.details(new RunId(instanceId, systemTest, 1)).isPresent());
jobs.start(instanceId, systemTest, versions);
runner.run();
assertEquals(64, jobs.runs(jobId).size());
assertEquals(2, jobs.runs(jobId).keySet().iterator().next().number());
assertFalse(jobs.details(new RunId(instanceId, systemTest, 1)).isPresent());
assertTrue(jobs.details(new RunId(instanceId, systemTest, 65)).isPresent());
JobRunner failureRunner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), (id, step) -> Optional.of(error));
for (int i = 0; i < jobs.historyLength() - 1; i++) {
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
}
assertEquals(64, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(65, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(66, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(66, jobs.runs(jobId).keySet().stream().skip(1).iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(66, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(66, jobs.runs(jobId).keySet().stream().skip(1).iterator().next().number());
assertEquals(68, jobs.runs(jobId).keySet().stream().skip(2).iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
runner.run();
assertEquals(64, jobs.runs(jobId).size());
assertEquals(69, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(132, jobs.lastSuccess(jobId).get().id().number());
assertFalse(jobs.firstFailing(jobId).isPresent());
}
@Test
@Test
public void timeout() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
jobs.start(id, systemTest, versions);
tester.clock().advance(JobRunner.jobTimeout.plus(Duration.ofSeconds(1)));
runner.run();
assertSame(aborted, jobs.last(id, systemTest).get().status());
}
@Test
public void jobMetrics() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
for (RunStatus status : RunStatus.values()) {
if (status == success) continue;
outcomes.put(deployTester, status);
jobs.start(id, systemTest, versions);
runner.run();
jobs.finish(jobs.last(id, systemTest).get().id());
}
Map<String, String> context = Map.of("applicationId", "tenant.real.default",
"tenantName", "tenant",
"app", "real.default",
"test", "true",
"zone", "test.us-east-1");
MetricsMock metric = ((MetricsMock) tester.controller().metric());
assertEquals(RunStatus.values().length - 1, metric.getMetric(context::equals, JobMetrics.start).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.abort).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.error).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.success).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.convergenceFailure).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.deploymentFailure).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.outOfCapacity).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.testFailure).get().intValue());
}
public static ExecutorService inThreadExecutor() {
return new AbstractExecutorService() {
AtomicBoolean shutDown = new AtomicBoolean(false);
@Override public void shutdown() { shutDown.set(true); }
@Override public List<Runnable> shutdownNow() { shutDown.set(true); return Collections.emptyList(); }
@Override public boolean isShutdown() { return shutDown.get(); }
@Override public boolean isTerminated() { return shutDown.get(); }
@Override public boolean awaitTermination(long timeout, TimeUnit unit) { return true; }
@Override public void execute(Runnable command) { command.run(); }
};
}
private static ExecutorService phasedExecutor(Phaser phaser) {
return new AbstractExecutorService() {
ExecutorService delegate = Executors.newFixedThreadPool(32);
@Override public void shutdown() { delegate.shutdown(); }
@Override public List<Runnable> shutdownNow() { return delegate.shutdownNow(); }
@Override public boolean isShutdown() { return delegate.isShutdown(); }
@Override public boolean isTerminated() { return delegate.isTerminated(); }
@Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { return delegate.awaitTermination(timeout, unit); }
@Override public void execute(Runnable command) {
phaser.register();
delegate.execute(() -> {
command.run();
phaser.arriveAndDeregister();
});
}
};
}
private static StepRunner mappedRunner(Map<Step, RunStatus> outcomes) {
return (step, id) -> Optional.ofNullable(outcomes.get(step.get()));
}
private static StepRunner waitingRunner(CyclicBarrier barrier) {
return (step, id) -> {
try {
if (step.get() == deployTester) {
barrier.await();
barrier.reset();
barrier.await();
}
}
catch (InterruptedException | BrokenBarrierException e) {
throw new AssertionError(e);
}
return Optional.of(running);
};
}
} | class JobRunnerTest {
private static final ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
private static final Versions versions = new Versions(Version.fromString("1.2.3"),
ApplicationVersion.from(new SourceRevision("repo",
"branch",
"bada55"),
321),
Optional.empty(),
Optional.empty());
@Test
public void multiThreadedExecutionFinishes() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
StepRunner stepRunner = (step, id) -> id.type() == stagingTest && step.get() == startTests? Optional.of(error) : Optional.of(running);
Phaser phaser = new Phaser(1);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
phasedExecutor(phaser), stepRunner);
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
jobs.start(id, systemTest, versions);
try {
jobs.start(id, systemTest, versions);
fail("Job is already running, so this should not be allowed!");
}
catch (IllegalStateException e) { }
jobs.start(id, stagingTest, versions);
assertTrue(jobs.last(id, systemTest).get().stepStatuses().values().stream().allMatch(unfinished::equals));
assertFalse(jobs.last(id, systemTest).get().hasEnded());
assertTrue(jobs.last(id, stagingTest).get().stepStatuses().values().stream().allMatch(unfinished::equals));
assertFalse(jobs.last(id, stagingTest).get().hasEnded());
runner.maintain();
phaser.arriveAndAwaitAdvance();
assertTrue(jobs.last(id, systemTest).get().stepStatuses().values().stream().allMatch(succeeded::equals));
assertTrue(jobs.last(id, stagingTest).get().hasEnded());
assertTrue(jobs.last(id, stagingTest).get().hasFailed());
}
@Test
public void stepLogic() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
Supplier<Run> run = () -> jobs.last(id, systemTest).get();
jobs.start(id, systemTest, versions);
RunId first = run.get().id();
Map<Step, Status> steps = run.get().stepStatuses();
runner.maintain();
assertEquals(steps, run.get().stepStatuses());
assertEquals(List.of(deployTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester);
outcomes.put(deployTester, running);
runner.maintain();
assertEquals(List.of(installTester, deployReal), run.get().readySteps());
assertStepsWithStartTime(run.get(), installTester, deployTester, deployReal);
outcomes.put(deployReal, running);
runner.maintain();
assertEquals(List.of(installTester, installReal), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal);
outcomes.put(installReal, running);
runner.maintain();
assertEquals(List.of(installTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal);
outcomes.put(installTester, running);
runner.maintain();
assertEquals(List.of(startTests), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests);
outcomes.put(startTests, running);
runner.maintain();
assertEquals(List.of(endTests), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests);
outcomes.put(endTests, testFailure);
runner.maintain();
assertTrue(run.get().hasFailed());
assertEquals(List.of(copyVespaLogs, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester);
outcomes.put(copyVespaLogs, running);
runner.maintain();
assertEquals(List.of(deactivateReal, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester, deactivateReal);
jobs.abort(run.get().id());
runner.maintain();
assertEquals(List.of(deactivateReal, deactivateTester), run.get().readySteps());
assertStepsWithStartTime(run.get(), deployTester, deployReal, installTester, installReal, startTests, endTests, copyVespaLogs, deactivateTester, deactivateReal);
outcomes.put(deactivateReal, running);
outcomes.put(deactivateTester, running);
outcomes.put(report, running);
runner.maintain();
assertTrue(run.get().hasFailed());
assertTrue(run.get().hasEnded());
assertTrue(run.get().status() == aborted);
jobs.start(id, systemTest, versions);
assertEquals(first.number() + 1, run.get().id().number());
outcomes.put(deployTester, error);
runner.maintain();
assertTrue(run.get().hasEnded());
assertTrue(run.get().hasFailed());
assertFalse(run.get().status() == aborted);
assertEquals(failed, run.get().stepStatuses().get(deployTester));
assertEquals(unfinished, run.get().stepStatuses().get(installTester));
assertEquals(succeeded, run.get().stepStatuses().get(report));
assertStepsWithStartTime(run.get(), deployTester, copyVespaLogs, deactivateTester, deactivateReal, report);
assertEquals(2, jobs.runs(id, systemTest).size());
jobs.start(id, systemTest, versions);
tester.applications().deleteInstance(id);
runner.maintain();
assertFalse(jobs.last(id, systemTest).isPresent());
assertTrue(jobs.runs(id, systemTest).isEmpty());
}
private void assertStepsWithStartTime(Run lastRun, Step... stepsWithStartTime) {
Set<Step> actualStepsWithStartTime = lastRun.steps().entrySet().stream()
.filter(entry -> entry.getValue().startTime().isPresent())
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
assertEquals(Set.of(stepsWithStartTime), actualStepsWithStartTime);
}
@Test
public void locksAndGarbage() throws InterruptedException, BrokenBarrierException {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
CyclicBarrier barrier = new CyclicBarrier(2);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
Executors.newFixedThreadPool(32), waitingRunner(barrier));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
RunId runId = new RunId(id, systemTest, 1);
jobs.start(id, systemTest, versions);
runner.maintain();
barrier.await();
try {
jobs.locked(id, systemTest, deactivateTester, step -> { });
fail("deployTester step should still be locked!");
}
catch (TimeoutException e) { }
assertEquals(Collections.singletonList(runId), jobs.active().stream().map(run -> run.id()).collect(Collectors.toList()));
tester.controllerTester().controller().applications().deleteApplication(TenantAndApplicationId.from(id), tester.controllerTester().credentialsFor(id.tenant()));
assertEquals(Collections.emptyList(), jobs.active());
assertEquals(runId, jobs.last(id, systemTest).get().id());
runner.maintain();
assertEquals(runId, jobs.last(id, systemTest).get().id());
barrier.await();
runner.maintain();
assertEquals(Optional.empty(), jobs.last(id, systemTest));
}
@Test
public void historyPruning() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), (id, step) -> Optional.of(running));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId instanceId = appId.defaultInstance();
JobId jobId = new JobId(instanceId, systemTest);
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
assertFalse(jobs.lastSuccess(jobId).isPresent());
for (int i = 0; i < jobs.historyLength(); i++) {
jobs.start(instanceId, systemTest, versions);
runner.run();
}
assertEquals(64, jobs.runs(jobId).size());
assertTrue(jobs.details(new RunId(instanceId, systemTest, 1)).isPresent());
jobs.start(instanceId, systemTest, versions);
runner.run();
assertEquals(64, jobs.runs(jobId).size());
assertEquals(2, jobs.runs(jobId).keySet().iterator().next().number());
assertFalse(jobs.details(new RunId(instanceId, systemTest, 1)).isPresent());
assertTrue(jobs.details(new RunId(instanceId, systemTest, 65)).isPresent());
JobRunner failureRunner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), (id, step) -> Optional.of(error));
for (int i = 0; i < jobs.historyLength() - 1; i++) {
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
}
assertEquals(64, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(65, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(66, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(66, jobs.runs(jobId).keySet().stream().skip(1).iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
failureRunner.run();
assertEquals(66, jobs.runs(jobId).size());
assertEquals(65, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(66, jobs.runs(jobId).keySet().stream().skip(1).iterator().next().number());
assertEquals(68, jobs.runs(jobId).keySet().stream().skip(2).iterator().next().number());
assertEquals(65, jobs.lastSuccess(jobId).get().id().number());
assertEquals(66, jobs.firstFailing(jobId).get().id().number());
jobs.start(instanceId, systemTest, versions);
runner.run();
assertEquals(64, jobs.runs(jobId).size());
assertEquals(69, jobs.runs(jobId).keySet().iterator().next().number());
assertEquals(132, jobs.lastSuccess(jobId).get().id().number());
assertFalse(jobs.firstFailing(jobId).isPresent());
}
@Test
@Test
public void timeout() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
jobs.start(id, systemTest, versions);
tester.clock().advance(JobRunner.jobTimeout.plus(Duration.ofSeconds(1)));
runner.run();
assertSame(aborted, jobs.last(id, systemTest).get().status());
}
@Test
public void jobMetrics() {
DeploymentTester tester = new DeploymentTester();
JobController jobs = tester.controller().jobController();
Map<Step, RunStatus> outcomes = new EnumMap<>(Step.class);
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
jobs.submit(appId, versions.targetApplication().source(), Optional.empty(), Optional.empty(), Optional.empty(), 2, applicationPackage, new byte[0]);
for (RunStatus status : RunStatus.values()) {
if (status == success) continue;
outcomes.put(deployTester, status);
jobs.start(id, systemTest, versions);
runner.run();
jobs.finish(jobs.last(id, systemTest).get().id());
}
Map<String, String> context = Map.of("applicationId", "tenant.real.default",
"tenantName", "tenant",
"app", "real.default",
"test", "true",
"zone", "test.us-east-1");
MetricsMock metric = ((MetricsMock) tester.controller().metric());
assertEquals(RunStatus.values().length - 1, metric.getMetric(context::equals, JobMetrics.start).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.abort).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.error).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.success).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.convergenceFailure).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.deploymentFailure).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.outOfCapacity).get().intValue());
assertEquals(1, metric.getMetric(context::equals, JobMetrics.testFailure).get().intValue());
}
public static ExecutorService inThreadExecutor() {
return new AbstractExecutorService() {
AtomicBoolean shutDown = new AtomicBoolean(false);
@Override public void shutdown() { shutDown.set(true); }
@Override public List<Runnable> shutdownNow() { shutDown.set(true); return Collections.emptyList(); }
@Override public boolean isShutdown() { return shutDown.get(); }
@Override public boolean isTerminated() { return shutDown.get(); }
@Override public boolean awaitTermination(long timeout, TimeUnit unit) { return true; }
@Override public void execute(Runnable command) { command.run(); }
};
}
private static ExecutorService phasedExecutor(Phaser phaser) {
return new AbstractExecutorService() {
ExecutorService delegate = Executors.newFixedThreadPool(32);
@Override public void shutdown() { delegate.shutdown(); }
@Override public List<Runnable> shutdownNow() { return delegate.shutdownNow(); }
@Override public boolean isShutdown() { return delegate.isShutdown(); }
@Override public boolean isTerminated() { return delegate.isTerminated(); }
@Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { return delegate.awaitTermination(timeout, unit); }
@Override public void execute(Runnable command) {
phaser.register();
delegate.execute(() -> {
command.run();
phaser.arriveAndDeregister();
});
}
};
}
private static StepRunner mappedRunner(Map<Step, RunStatus> outcomes) {
return (step, id) -> Optional.ofNullable(outcomes.get(step.get()));
}
private static StepRunner waitingRunner(CyclicBarrier barrier) {
return (step, id) -> {
try {
if (step.get() == deployTester) {
barrier.await();
barrier.reset();
barrier.await();
}
}
catch (InterruptedException | BrokenBarrierException e) {
throw new AssertionError(e);
}
return Optional.of(running);
};
}
} |
Please add the instance to the log message. Nice to see the same hash go in and out. | public void addInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Adding Telegraf instance to registry");
telegrafInstances.add(telegraf);
} | logger.log(LogLevel.DEBUG, () -> "Adding Telegraf instance to registry"); | public void addInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Adding Telegraf instance to registry: " + telegraf);
telegrafInstances.add(telegraf);
} | class TelegrafRegistry {
private static final List<Telegraf> telegrafInstances = Collections.synchronizedList(new ArrayList<>());
private static final Logger logger = Logger.getLogger(TelegrafRegistry.class.getName());
public void removeInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Removing Telegraf instance from registry");
telegrafInstances.remove(telegraf);
}
public boolean isEmpty() {
return telegrafInstances.isEmpty();
}
} | class TelegrafRegistry {
private static final List<Telegraf> telegrafInstances = Collections.synchronizedList(new ArrayList<>());
private static final Logger logger = Logger.getLogger(TelegrafRegistry.class.getName());
public void removeInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Removing Telegraf instance from registry: " + telegraf);
telegrafInstances.remove(telegraf);
}
public boolean isEmpty() {
return telegrafInstances.isEmpty();
}
} |
```suggestion logger.log(LogLevel.DEBUG, () -> "Adding Telegraf instance to registry: " + telegraf); ``` | public void addInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Adding Telegraf instance to registry");
telegrafInstances.add(telegraf);
} | logger.log(LogLevel.DEBUG, () -> "Adding Telegraf instance to registry"); | public void addInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Adding Telegraf instance to registry: " + telegraf);
telegrafInstances.add(telegraf);
} | class TelegrafRegistry {
private static final List<Telegraf> telegrafInstances = Collections.synchronizedList(new ArrayList<>());
private static final Logger logger = Logger.getLogger(TelegrafRegistry.class.getName());
public void removeInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Removing Telegraf instance from registry");
telegrafInstances.remove(telegraf);
}
public boolean isEmpty() {
return telegrafInstances.isEmpty();
}
} | class TelegrafRegistry {
private static final List<Telegraf> telegrafInstances = Collections.synchronizedList(new ArrayList<>());
private static final Logger logger = Logger.getLogger(TelegrafRegistry.class.getName());
public void removeInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Removing Telegraf instance from registry: " + telegraf);
telegrafInstances.remove(telegraf);
}
public boolean isEmpty() {
return telegrafInstances.isEmpty();
}
} |
```suggestion logger.log(LogLevel.DEBUG, () -> "Removing Telegraf instance from registry: " + telegraf); ``` | public void removeInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Removing Telegraf instance from registry");
telegrafInstances.remove(telegraf);
} | logger.log(LogLevel.DEBUG, () -> "Removing Telegraf instance from registry"); | public void removeInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Removing Telegraf instance from registry: " + telegraf);
telegrafInstances.remove(telegraf);
} | class TelegrafRegistry {
private static final List<Telegraf> telegrafInstances = Collections.synchronizedList(new ArrayList<>());
private static final Logger logger = Logger.getLogger(TelegrafRegistry.class.getName());
public void addInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Adding Telegraf instance to registry");
telegrafInstances.add(telegraf);
}
public boolean isEmpty() {
return telegrafInstances.isEmpty();
}
} | class TelegrafRegistry {
private static final List<Telegraf> telegrafInstances = Collections.synchronizedList(new ArrayList<>());
private static final Logger logger = Logger.getLogger(TelegrafRegistry.class.getName());
public void addInstance(Telegraf telegraf) {
logger.log(LogLevel.DEBUG, () -> "Adding Telegraf instance to registry: " + telegraf);
telegrafInstances.add(telegraf);
}
public boolean isEmpty() {
return telegrafInstances.isEmpty();
}
} |
This should just include all states for which `Node.State#isAllocated` is `true`. | private int findHighestIndex(ApplicationId application, ClusterSpec cluster) {
int highestIndex = -1;
for (Node node : nodeRepository.getNodes(application,
Node.State.reserved, Node.State.active, Node.State.inactive,
Node.State.parked, Node.State.failed)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(cluster.id())) continue;
if ( ! nodeCluster.type().equals(cluster.type())) continue;
highestIndex = Math.max(node.allocation().get().membership().index(), highestIndex);
}
return highestIndex;
} | Node.State.reserved, Node.State.active, Node.State.inactive, | private int findHighestIndex(ApplicationId application, ClusterSpec cluster) {
int highestIndex = -1;
for (Node node : nodeRepository.getNodes(application, Node.State.allocatedStates().toArray(new Node.State[0]))) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(cluster.id())) continue;
if ( ! nodeCluster.type().equals(cluster.type())) continue;
highestIndex = Math.max(node.allocation().get().membership().index(), highestIndex);
}
return highestIndex;
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
private final int spareCount;
public Preparer(NodeRepository nodeRepository, int spareCount, Optional<HostProvisioner> hostProvisioner,
HostResourcesCalculator hostResourcesCalculator, FlagSource flagSource,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.spareCount = spareCount;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, hostResourcesCalculator, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
MutableInteger highestIndex = new MutableInteger(findHighestIndex(application, cluster));
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
highestIndex, spareCount, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
replace(acceptedNodes, retire(surplusNodes));
return acceptedNodes;
}
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.getNodes(application, Node.State.active)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
/**
* Returns the highest index number of all active and failed nodes in this cluster, or -1 if there are no nodes.
* We include failed nodes to avoid reusing the index of the failed node in the case where the failed node is the
* node with the highest index.
*/
/** Returns retired copies of the given nodes, unless they are removable */
private List<Node> retire(List<Node> nodes) {
List<Node> retired = new ArrayList<>(nodes.size());
for (Node node : nodes) {
if ( ! node.allocation().get().isRemovable())
retired.add(node.retire(Agent.application, nodeRepository.clock().instant()));
}
return retired;
}
} | class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
private final int spareCount;
public Preparer(NodeRepository nodeRepository, int spareCount, Optional<HostProvisioner> hostProvisioner,
HostResourcesCalculator hostResourcesCalculator, FlagSource flagSource,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.spareCount = spareCount;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, hostResourcesCalculator, flagSource);
}
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
* @return the list of nodes this cluster will have allocated if activated
*/
private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
MutableInteger highestIndex = new MutableInteger(findHighestIndex(application, cluster));
List<Node> acceptedNodes = new ArrayList<>();
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
requestedNodes.fraction(wantedGroups), surplusNodes,
highestIndex, spareCount, wantedGroups);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
replace(acceptedNodes, retire(surplusNodes));
return acceptedNodes;
}
/** Prepare a load balancer for given application and cluster */
public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
for (Node node : nodeRepository.getNodes(application, Node.State.active)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
Node node = i.next();
ClusterMembership membership = node.allocation().get().membership();
ClusterSpec cluster = membership.cluster();
if (cluster.group().get().index() >= wantedGroups) {
ClusterSpec.Group newGroup = targetGroup.orElse(ClusterSpec.Group.from(0));
ClusterMembership newGroupMembership = membership.with(cluster.with(Optional.of(newGroup)));
i.set(node.with(node.allocation().get().with(newGroupMembership)));
}
}
}
/**
* Nodes are immutable so when changing attributes to the node we create a new instance.
*
* This method is used to both add new nodes and replaces old node references with the new references.
*/
private List<Node> replace(List<Node> list, List<Node> changed) {
list.removeAll(changed);
list.addAll(changed);
return list;
}
/**
* Returns the highest index number of all active and failed nodes in this cluster, or -1 if there are no nodes.
* We include failed nodes to avoid reusing the index of the failed node in the case where the failed node is the
* node with the highest index.
*/
/** Returns retired copies of the given nodes, unless they are removable */
private List<Node> retire(List<Node> nodes) {
List<Node> retired = new ArrayList<>(nodes.size());
for (Node node : nodes) {
if ( ! node.allocation().get().isRemovable())
retired.add(node.retire(Agent.application, nodeRepository.clock().instant()));
}
return retired;
}
} |
Add a TODO for removing this. | public TenantController(Controller controller, CuratorDb curator, AccessControl accessControl) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.curator = Objects.requireNonNull(curator, "curator must be non-null");
this.accessControl = accessControl;
Once.after(Duration.ofMinutes(1), () -> {
Instant start = controller.clock().instant();
int count = 0;
for (TenantName name : curator.readTenantNames()) {
if (name.value().startsWith(Tenant.userPrefix))
curator.removeTenant(name);
else {
lockIfPresent(name, LockedTenant.class, this::store);
count++;
}
}
log.log(Level.INFO, String.format("Wrote %d tenants in %s", count,
Duration.between(start, controller.clock().instant())));
});
} | if (name.value().startsWith(Tenant.userPrefix)) | public TenantController(Controller controller, CuratorDb curator, AccessControl accessControl) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.curator = Objects.requireNonNull(curator, "curator must be non-null");
this.accessControl = accessControl;
Once.after(Duration.ofMinutes(1), () -> {
Instant start = controller.clock().instant();
int count = 0;
for (TenantName name : curator.readTenantNames()) {
if (name.value().startsWith(Tenant.userPrefix))
curator.removeTenant(name);
else {
lockIfPresent(name, LockedTenant.class, this::store);
count++;
}
}
log.log(Level.INFO, String.format("Wrote %d tenants in %s", count,
Duration.between(start, controller.clock().instant())));
});
} | class TenantController {
private static final Logger log = Logger.getLogger(TenantController.class.getName());
private final Controller controller;
private final CuratorDb curator;
private final AccessControl accessControl;
/** Returns a list of all known tenants sorted by name */
public List<Tenant> asList() {
return curator.readTenants().stream()
.sorted(Comparator.comparing(Tenant::name))
.collect(Collectors.toList());
}
/** Returns the list of tenants accessible to the given user. */
public List<Tenant> asList(Credentials credentials) {
return ((AthenzFacade) accessControl).accessibleTenants(asList(), credentials);
}
/** Locks a tenant for modification and applies the given action. */
public <T extends LockedTenant> void lockIfPresent(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
get(name).map(tenant -> LockedTenant.of(tenant, lock))
.map(token::cast)
.ifPresent(action);
}
}
/** Lock a tenant for modification and apply action. Throws if the tenant does not exist */
public <T extends LockedTenant> void lockOrThrow(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
action.accept(token.cast(LockedTenant.of(require(name), lock)));
}
}
/** Returns the tenant with the given name, or throws. */
public Tenant require(TenantName name) {
return get(name).orElseThrow(() -> new IllegalArgumentException("No such tenant '" + name + "'."));
}
/** Replace and store any previous version of given tenant */
public void store(LockedTenant tenant) {
curator.writeTenant(tenant.get());
}
/** Create a tenant, provided the given credentials are valid. */
public void create(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
requireNonExistent(tenantSpec.tenant());
curator.writeTenant(accessControl.createTenant(tenantSpec, credentials, asList()));
}
}
/** Find tenant by name */
public Optional<Tenant> get(TenantName name) {
return curator.readTenant(name);
}
/** Find tenant by name */
public Optional<Tenant> get(String name) {
return get(TenantName.from(name));
}
/** Updates the tenant contained in the given tenant spec with new data. */
public void update(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
curator.writeTenant(accessControl.updateTenant(tenantSpec, credentials, asList(),
controller.applications().asList(tenantSpec.tenant())));
}
}
/** Deletes the given tenant. */
public void delete(TenantName tenant, Credentials credentials) {
try (Lock lock = lock(tenant)) {
require(tenant);
if ( ! controller.applications().asList(tenant).isEmpty())
throw new IllegalArgumentException("Could not delete tenant '" + tenant.value()
+ "': This tenant has active applications");
curator.removeTenant(tenant);
accessControl.deleteTenant(tenant, credentials);
}
}
private void requireNonExistent(TenantName name) {
if ( "hosted-vespa".equals(name.value())
|| get(name).isPresent()
|| get(name.value().replace('-', '_')).isPresent()) {
throw new IllegalArgumentException("Tenant '" + name + "' already exists");
}
}
/**
* Returns a lock which provides exclusive rights to changing this tenant.
* Any operation which stores a tenant need to first acquire this lock, then read, modify
* and store the tenant, and finally release (close) the lock.
*/
private Lock lock(TenantName tenant) {
return curator.lock(tenant);
}
} | class TenantController {
private static final Logger log = Logger.getLogger(TenantController.class.getName());
private final Controller controller;
private final CuratorDb curator;
private final AccessControl accessControl;
/** Returns a list of all known tenants sorted by name */
public List<Tenant> asList() {
return curator.readTenants().stream()
.sorted(Comparator.comparing(Tenant::name))
.collect(Collectors.toList());
}
/** Returns the list of tenants accessible to the given user. */
public List<Tenant> asList(Credentials credentials) {
return ((AthenzFacade) accessControl).accessibleTenants(asList(), credentials);
}
/** Locks a tenant for modification and applies the given action. */
public <T extends LockedTenant> void lockIfPresent(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
get(name).map(tenant -> LockedTenant.of(tenant, lock))
.map(token::cast)
.ifPresent(action);
}
}
/** Lock a tenant for modification and apply action. Throws if the tenant does not exist */
public <T extends LockedTenant> void lockOrThrow(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
action.accept(token.cast(LockedTenant.of(require(name), lock)));
}
}
/** Returns the tenant with the given name, or throws. */
public Tenant require(TenantName name) {
return get(name).orElseThrow(() -> new IllegalArgumentException("No such tenant '" + name + "'."));
}
/** Replace and store any previous version of given tenant */
public void store(LockedTenant tenant) {
curator.writeTenant(tenant.get());
}
/** Create a tenant, provided the given credentials are valid. */
public void create(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
requireNonExistent(tenantSpec.tenant());
curator.writeTenant(accessControl.createTenant(tenantSpec, credentials, asList()));
}
}
/** Find tenant by name */
public Optional<Tenant> get(TenantName name) {
return curator.readTenant(name);
}
/** Find tenant by name */
public Optional<Tenant> get(String name) {
return get(TenantName.from(name));
}
/** Updates the tenant contained in the given tenant spec with new data. */
public void update(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
curator.writeTenant(accessControl.updateTenant(tenantSpec, credentials, asList(),
controller.applications().asList(tenantSpec.tenant())));
}
}
/** Deletes the given tenant. */
public void delete(TenantName tenant, Credentials credentials) {
try (Lock lock = lock(tenant)) {
require(tenant);
if ( ! controller.applications().asList(tenant).isEmpty())
throw new IllegalArgumentException("Could not delete tenant '" + tenant.value()
+ "': This tenant has active applications");
curator.removeTenant(tenant);
accessControl.deleteTenant(tenant, credentials);
}
}
private void requireNonExistent(TenantName name) {
if ( "hosted-vespa".equals(name.value())
|| get(name).isPresent()
|| get(name.value().replace('-', '_')).isPresent()) {
throw new IllegalArgumentException("Tenant '" + name + "' already exists");
}
}
/**
* Returns a lock which provides exclusive rights to changing this tenant.
* Any operation which stores a tenant need to first acquire this lock, then read, modify
* and store the tenant, and finally release (close) the lock.
*/
private Lock lock(TenantName tenant) {
return curator.lock(tenant);
}
} |
```suggestion if (name.value().startsWith(Tenant.userPrefix)) // TODO jonmv: Remove after run once. ``` | public TenantController(Controller controller, CuratorDb curator, AccessControl accessControl) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.curator = Objects.requireNonNull(curator, "curator must be non-null");
this.accessControl = accessControl;
Once.after(Duration.ofMinutes(1), () -> {
Instant start = controller.clock().instant();
int count = 0;
for (TenantName name : curator.readTenantNames()) {
if (name.value().startsWith(Tenant.userPrefix))
curator.removeTenant(name);
else {
lockIfPresent(name, LockedTenant.class, this::store);
count++;
}
}
log.log(Level.INFO, String.format("Wrote %d tenants in %s", count,
Duration.between(start, controller.clock().instant())));
});
} | if (name.value().startsWith(Tenant.userPrefix)) | public TenantController(Controller controller, CuratorDb curator, AccessControl accessControl) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.curator = Objects.requireNonNull(curator, "curator must be non-null");
this.accessControl = accessControl;
Once.after(Duration.ofMinutes(1), () -> {
Instant start = controller.clock().instant();
int count = 0;
for (TenantName name : curator.readTenantNames()) {
if (name.value().startsWith(Tenant.userPrefix))
curator.removeTenant(name);
else {
lockIfPresent(name, LockedTenant.class, this::store);
count++;
}
}
log.log(Level.INFO, String.format("Wrote %d tenants in %s", count,
Duration.between(start, controller.clock().instant())));
});
} | class TenantController {
private static final Logger log = Logger.getLogger(TenantController.class.getName());
private final Controller controller;
private final CuratorDb curator;
private final AccessControl accessControl;
/** Returns a list of all known tenants sorted by name */
public List<Tenant> asList() {
return curator.readTenants().stream()
.sorted(Comparator.comparing(Tenant::name))
.collect(Collectors.toList());
}
/** Returns the list of tenants accessible to the given user. */
public List<Tenant> asList(Credentials credentials) {
return ((AthenzFacade) accessControl).accessibleTenants(asList(), credentials);
}
/** Locks a tenant for modification and applies the given action. */
public <T extends LockedTenant> void lockIfPresent(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
get(name).map(tenant -> LockedTenant.of(tenant, lock))
.map(token::cast)
.ifPresent(action);
}
}
/** Lock a tenant for modification and apply action. Throws if the tenant does not exist */
public <T extends LockedTenant> void lockOrThrow(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
action.accept(token.cast(LockedTenant.of(require(name), lock)));
}
}
/** Returns the tenant with the given name, or throws. */
public Tenant require(TenantName name) {
return get(name).orElseThrow(() -> new IllegalArgumentException("No such tenant '" + name + "'."));
}
/** Replace and store any previous version of given tenant */
public void store(LockedTenant tenant) {
curator.writeTenant(tenant.get());
}
/** Create a tenant, provided the given credentials are valid. */
public void create(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
requireNonExistent(tenantSpec.tenant());
curator.writeTenant(accessControl.createTenant(tenantSpec, credentials, asList()));
}
}
/** Find tenant by name */
public Optional<Tenant> get(TenantName name) {
return curator.readTenant(name);
}
/** Find tenant by name */
public Optional<Tenant> get(String name) {
return get(TenantName.from(name));
}
/** Updates the tenant contained in the given tenant spec with new data. */
public void update(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
curator.writeTenant(accessControl.updateTenant(tenantSpec, credentials, asList(),
controller.applications().asList(tenantSpec.tenant())));
}
}
/** Deletes the given tenant. */
public void delete(TenantName tenant, Credentials credentials) {
try (Lock lock = lock(tenant)) {
require(tenant);
if ( ! controller.applications().asList(tenant).isEmpty())
throw new IllegalArgumentException("Could not delete tenant '" + tenant.value()
+ "': This tenant has active applications");
curator.removeTenant(tenant);
accessControl.deleteTenant(tenant, credentials);
}
}
private void requireNonExistent(TenantName name) {
if ( "hosted-vespa".equals(name.value())
|| get(name).isPresent()
|| get(name.value().replace('-', '_')).isPresent()) {
throw new IllegalArgumentException("Tenant '" + name + "' already exists");
}
}
/**
* Returns a lock which provides exclusive rights to changing this tenant.
* Any operation which stores a tenant need to first acquire this lock, then read, modify
* and store the tenant, and finally release (close) the lock.
*/
private Lock lock(TenantName tenant) {
return curator.lock(tenant);
}
} | class TenantController {
private static final Logger log = Logger.getLogger(TenantController.class.getName());
private final Controller controller;
private final CuratorDb curator;
private final AccessControl accessControl;
/** Returns a list of all known tenants sorted by name */
public List<Tenant> asList() {
return curator.readTenants().stream()
.sorted(Comparator.comparing(Tenant::name))
.collect(Collectors.toList());
}
/** Returns the list of tenants accessible to the given user. */
public List<Tenant> asList(Credentials credentials) {
return ((AthenzFacade) accessControl).accessibleTenants(asList(), credentials);
}
/** Locks a tenant for modification and applies the given action. */
public <T extends LockedTenant> void lockIfPresent(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
get(name).map(tenant -> LockedTenant.of(tenant, lock))
.map(token::cast)
.ifPresent(action);
}
}
/** Lock a tenant for modification and apply action. Throws if the tenant does not exist */
public <T extends LockedTenant> void lockOrThrow(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
action.accept(token.cast(LockedTenant.of(require(name), lock)));
}
}
/** Returns the tenant with the given name, or throws. */
public Tenant require(TenantName name) {
return get(name).orElseThrow(() -> new IllegalArgumentException("No such tenant '" + name + "'."));
}
/** Replace and store any previous version of given tenant */
public void store(LockedTenant tenant) {
curator.writeTenant(tenant.get());
}
/** Create a tenant, provided the given credentials are valid. */
public void create(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
requireNonExistent(tenantSpec.tenant());
curator.writeTenant(accessControl.createTenant(tenantSpec, credentials, asList()));
}
}
/** Find tenant by name */
public Optional<Tenant> get(TenantName name) {
return curator.readTenant(name);
}
/** Find tenant by name */
public Optional<Tenant> get(String name) {
return get(TenantName.from(name));
}
/** Updates the tenant contained in the given tenant spec with new data. */
public void update(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
curator.writeTenant(accessControl.updateTenant(tenantSpec, credentials, asList(),
controller.applications().asList(tenantSpec.tenant())));
}
}
/** Deletes the given tenant. */
public void delete(TenantName tenant, Credentials credentials) {
try (Lock lock = lock(tenant)) {
require(tenant);
if ( ! controller.applications().asList(tenant).isEmpty())
throw new IllegalArgumentException("Could not delete tenant '" + tenant.value()
+ "': This tenant has active applications");
curator.removeTenant(tenant);
accessControl.deleteTenant(tenant, credentials);
}
}
private void requireNonExistent(TenantName name) {
if ( "hosted-vespa".equals(name.value())
|| get(name).isPresent()
|| get(name.value().replace('-', '_')).isPresent()) {
throw new IllegalArgumentException("Tenant '" + name + "' already exists");
}
}
/**
* Returns a lock which provides exclusive rights to changing this tenant.
* Any operation which stores a tenant need to first acquire this lock, then read, modify
* and store the tenant, and finally release (close) the lock.
*/
private Lock lock(TenantName tenant) {
return curator.lock(tenant);
}
} |
I suggest having a method to wait for an executor completion and calling it for each executor | private static void release(ExecutorWithFallback executor) {
synchronized (globalLock) {
if (executor.primary != globalPrimaryExecutor) {
throw new IllegalStateException("primary executor " + executor.primary + " != " + globalPrimaryExecutor);
}
if (executor.secondary != globalFallbackExecutor) {
throw new IllegalStateException("secondary executor " + executor.secondary + " != " + globalFallbackExecutor);
}
usages--;
if (usages == 0) {
globalPrimaryExecutor.shutdown();
globalFallbackExecutor.shutdown();
while (true) {
try {
if (globalPrimaryExecutor != null && globalPrimaryExecutor.awaitTermination(60, TimeUnit.SECONDS)) {
globalPrimaryExecutor = null;
}
if (globalFallbackExecutor != null && globalFallbackExecutor.awaitTermination(60, TimeUnit.SECONDS)) {
globalFallbackExecutor = null;
}
if (globalFallbackExecutor == null && globalFallbackExecutor == null) {
return;
}
} catch (InterruptedException e) {}
}
}
}
} | while (true) { | private static void release(ExecutorWithFallback executor) {
synchronized (globalLock) {
if (executor.primary != globalPrimaryExecutor) {
throw new IllegalStateException("primary executor " + executor.primary + " != " + globalPrimaryExecutor);
}
if (executor.secondary != globalFallbackExecutor) {
throw new IllegalStateException("secondary executor " + executor.secondary + " != " + globalFallbackExecutor);
}
usages--;
if (usages == 0) {
globalPrimaryExecutor.shutdown();
globalFallbackExecutor.shutdown();
join(globalPrimaryExecutor);
globalPrimaryExecutor = null;
join(globalFallbackExecutor);
globalFallbackExecutor = null;
}
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForEver(command));
}
}
private void retryForEver(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForever(command));
}
}
private void retryForever(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} |
extra space | private static ExecutorWithFallback acquire() {
synchronized (globalLock) {
if (globalPrimaryExecutor == null) {
globalPrimaryExecutor = new ThreadPoolExecutor(1, 64, 1L, TimeUnit.SECONDS,
new SynchronousQueue<>(), ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.primary"));
globalFallbackExecutor = Executors.newSingleThreadExecutor(ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.fallback"));
}
usages ++;
return new ExecutorWithFallback(globalPrimaryExecutor, globalFallbackExecutor);
}
} | usages ++; | private static ExecutorWithFallback acquire() {
synchronized (globalLock) {
if (globalPrimaryExecutor == null) {
if (globalFallbackExecutor != null) {
throw new IllegalStateException("fallback executor must be null !");
}
if (usages != 0) {
throw new IllegalStateException("usages " + usages + " != 0");
}
globalPrimaryExecutor = new ThreadPoolExecutor(1, 64, 1L, TimeUnit.SECONDS,
new SynchronousQueue<>(), ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.primary"));
globalFallbackExecutor = Executors.newSingleThreadExecutor(ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.fallback"));
}
usages++;
return new ExecutorWithFallback(globalPrimaryExecutor, globalFallbackExecutor);
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForEver(command));
}
}
private void retryForEver(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForever(command));
}
}
private void retryForever(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} |
consider checking usages and secondary pointer for consistency | private static ExecutorWithFallback acquire() {
synchronized (globalLock) {
if (globalPrimaryExecutor == null) {
globalPrimaryExecutor = new ThreadPoolExecutor(1, 64, 1L, TimeUnit.SECONDS,
new SynchronousQueue<>(), ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.primary"));
globalFallbackExecutor = Executors.newSingleThreadExecutor(ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.fallback"));
}
usages ++;
return new ExecutorWithFallback(globalPrimaryExecutor, globalFallbackExecutor);
}
} | globalPrimaryExecutor = new ThreadPoolExecutor(1, 64, 1L, TimeUnit.SECONDS, | private static ExecutorWithFallback acquire() {
synchronized (globalLock) {
if (globalPrimaryExecutor == null) {
if (globalFallbackExecutor != null) {
throw new IllegalStateException("fallback executor must be null !");
}
if (usages != 0) {
throw new IllegalStateException("usages " + usages + " != 0");
}
globalPrimaryExecutor = new ThreadPoolExecutor(1, 64, 1L, TimeUnit.SECONDS,
new SynchronousQueue<>(), ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.primary"));
globalFallbackExecutor = Executors.newSingleThreadExecutor(ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.fallback"));
}
usages++;
return new ExecutorWithFallback(globalPrimaryExecutor, globalFallbackExecutor);
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForEver(command));
}
}
private void retryForEver(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForever(command));
}
}
private void retryForever(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} |
Fixed | private static ExecutorWithFallback acquire() {
synchronized (globalLock) {
if (globalPrimaryExecutor == null) {
globalPrimaryExecutor = new ThreadPoolExecutor(1, 64, 1L, TimeUnit.SECONDS,
new SynchronousQueue<>(), ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.primary"));
globalFallbackExecutor = Executors.newSingleThreadExecutor(ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.fallback"));
}
usages ++;
return new ExecutorWithFallback(globalPrimaryExecutor, globalFallbackExecutor);
}
} | usages ++; | private static ExecutorWithFallback acquire() {
synchronized (globalLock) {
if (globalPrimaryExecutor == null) {
if (globalFallbackExecutor != null) {
throw new IllegalStateException("fallback executor must be null !");
}
if (usages != 0) {
throw new IllegalStateException("usages " + usages + " != 0");
}
globalPrimaryExecutor = new ThreadPoolExecutor(1, 64, 1L, TimeUnit.SECONDS,
new SynchronousQueue<>(), ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.primary"));
globalFallbackExecutor = Executors.newSingleThreadExecutor(ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.fallback"));
}
usages++;
return new ExecutorWithFallback(globalPrimaryExecutor, globalFallbackExecutor);
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForEver(command));
}
}
private void retryForEver(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForever(command));
}
}
private void retryForever(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} |
Fixed | private static void release(ExecutorWithFallback executor) {
synchronized (globalLock) {
if (executor.primary != globalPrimaryExecutor) {
throw new IllegalStateException("primary executor " + executor.primary + " != " + globalPrimaryExecutor);
}
if (executor.secondary != globalFallbackExecutor) {
throw new IllegalStateException("secondary executor " + executor.secondary + " != " + globalFallbackExecutor);
}
usages--;
if (usages == 0) {
globalPrimaryExecutor.shutdown();
globalFallbackExecutor.shutdown();
while (true) {
try {
if (globalPrimaryExecutor != null && globalPrimaryExecutor.awaitTermination(60, TimeUnit.SECONDS)) {
globalPrimaryExecutor = null;
}
if (globalFallbackExecutor != null && globalFallbackExecutor.awaitTermination(60, TimeUnit.SECONDS)) {
globalFallbackExecutor = null;
}
if (globalFallbackExecutor == null && globalFallbackExecutor == null) {
return;
}
} catch (InterruptedException e) {}
}
}
}
} | while (true) { | private static void release(ExecutorWithFallback executor) {
synchronized (globalLock) {
if (executor.primary != globalPrimaryExecutor) {
throw new IllegalStateException("primary executor " + executor.primary + " != " + globalPrimaryExecutor);
}
if (executor.secondary != globalFallbackExecutor) {
throw new IllegalStateException("secondary executor " + executor.secondary + " != " + globalFallbackExecutor);
}
usages--;
if (usages == 0) {
globalPrimaryExecutor.shutdown();
globalFallbackExecutor.shutdown();
join(globalPrimaryExecutor);
globalPrimaryExecutor = null;
join(globalFallbackExecutor);
globalFallbackExecutor = null;
}
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForEver(command));
}
}
private void retryForEver(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForever(command));
}
}
private void retryForever(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} |
Fixed | private static ExecutorWithFallback acquire() {
synchronized (globalLock) {
if (globalPrimaryExecutor == null) {
globalPrimaryExecutor = new ThreadPoolExecutor(1, 64, 1L, TimeUnit.SECONDS,
new SynchronousQueue<>(), ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.primary"));
globalFallbackExecutor = Executors.newSingleThreadExecutor(ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.fallback"));
}
usages ++;
return new ExecutorWithFallback(globalPrimaryExecutor, globalFallbackExecutor);
}
} | globalPrimaryExecutor = new ThreadPoolExecutor(1, 64, 1L, TimeUnit.SECONDS, | private static ExecutorWithFallback acquire() {
synchronized (globalLock) {
if (globalPrimaryExecutor == null) {
if (globalFallbackExecutor != null) {
throw new IllegalStateException("fallback executor must be null !");
}
if (usages != 0) {
throw new IllegalStateException("usages " + usages + " != 0");
}
globalPrimaryExecutor = new ThreadPoolExecutor(1, 64, 1L, TimeUnit.SECONDS,
new SynchronousQueue<>(), ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.primary"));
globalFallbackExecutor = Executors.newSingleThreadExecutor(ThreadFactoryFactory.getDaemonThreadFactory("jrt.connector.fallback"));
}
usages++;
return new ExecutorWithFallback(globalPrimaryExecutor, globalFallbackExecutor);
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForEver(command));
}
}
private void retryForEver(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} | class ExecutorWithFallback implements Executor {
private final Executor primary;
private final Executor secondary;
ExecutorWithFallback(Executor primary, Executor secondary) {
this.primary = primary;
this.secondary = secondary;
}
@Override
public void execute(Runnable command) {
try {
primary.execute(command);
} catch (RejectedExecutionException e1) {
secondary.execute(() -> retryForever(command));
}
}
private void retryForever(Runnable command) {
while (true) {
try {
primary.execute(command);
return;
} catch (RejectedExecutionException rejected) {
try {
Thread.sleep(1);
} catch (InterruptedException silenced) { }
}
}
}
} |
If more than one of WANT_TO_RETIRE, ipAddresses, additionalIpAddresses are specified in the patch request, then this PR will invoke `nodeRepository.list(lock)` that many times. This happens e.g. on provisioning, when the last two are specified. At the risk of making this much more convoluted, consider listing at most once. | private Node applyField(Node node, String name, Inspector value) {
switch (name) {
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), clock.instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
if (node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
throw new IllegalArgumentException("Docker image can only be set for docker containers");
return node.with(node.status().withDockerImage(DockerImage.fromString(asString(value))));
case "vespaVersion" :
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentOsVersion" :
return node.withCurrentOsVersion(Version.fromString(asString(value)), clock.instant());
case "currentFirmwareCheck":
return node.withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value)));
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(asStringSet(value))), nodes.get());
case "additionalIpAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), nodes.get());
case WANT_TO_RETIRE :
return node.withWantToRetire(asBoolean(value), Agent.operator, clock.instant());
case "wantToDeprovision" :
if (node.type() != NodeType.host && asBoolean(value))
throw new IllegalArgumentException("wantToDeprovision can only be set for hosts");
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "reports" :
return nodeWithPatchedReports(node, value);
case "openStackId" :
return node.withOpenStackId(asString(value));
case "diskGb":
case "minDiskAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble())));
case "memoryGb":
case "minMainMemoryAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble())));
case "vcpu":
case "minCpuCores":
return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble())));
case "fastDisk":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? fast : slow)));
case "remoteStorage":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? remote : local)));
case "bandwidthGbps":
return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble())));
case "modelName":
return value.type() == Type.NIX ? node.withoutModelName() : node.withModelName(asString(value));
case "requiredDiskSpeed":
return patchRequiredDiskSpeed(asString(value));
case "reservedTo":
return value.type() == Type.NIX ? node.withoutReservedTo() : node.withReservedTo(TenantName.from(value.asString()));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), nodes.get()); | private Node applyField(Node node, String name, Inspector value) {
switch (name) {
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), clock.instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
if (node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
throw new IllegalArgumentException("Docker image can only be set for docker containers");
return node.with(node.status().withDockerImage(DockerImage.fromString(asString(value))));
case "vespaVersion" :
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentOsVersion" :
return node.withCurrentOsVersion(Version.fromString(asString(value)), clock.instant());
case "currentFirmwareCheck":
return node.withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value)));
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(asStringSet(value))), nodes.get());
case "additionalIpAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), nodes.get());
case WANT_TO_RETIRE :
return node.withWantToRetire(asBoolean(value), Agent.operator, clock.instant());
case "wantToDeprovision" :
if (node.type() != NodeType.host && asBoolean(value))
throw new IllegalArgumentException("wantToDeprovision can only be set for hosts");
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "reports" :
return nodeWithPatchedReports(node, value);
case "openStackId" :
return node.withOpenStackId(asString(value));
case "diskGb":
case "minDiskAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble())));
case "memoryGb":
case "minMainMemoryAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble())));
case "vcpu":
case "minCpuCores":
return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble())));
case "fastDisk":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? fast : slow)));
case "remoteStorage":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? remote : local)));
case "bandwidthGbps":
return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble())));
case "modelName":
return value.type() == Type.NIX ? node.withoutModelName() : node.withModelName(asString(value));
case "requiredDiskSpeed":
return patchRequiredDiskSpeed(asString(value));
case "reservedTo":
return value.type() == Type.NIX ? node.withoutReservedTo() : node.withReservedTo(TenantName.from(value.asString()));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | class NodePatcher {
private static final String WANT_TO_RETIRE = "wantToRetire";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final Supplier<LockedNodeList> nodes;
private final Clock clock;
private Node node;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, Supplier<LockedNodeList> nodes, Clock clock) {
this.nodeFlavors = nodeFlavors;
this.node = node;
this.nodes = nodes;
this.clock = clock;
try {
this.inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
} catch (IOException e) {
throw new UncheckedIOException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
List<Node> patchedNodes = new ArrayList<>();
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(node, name, value);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
try {
patchedNodes.addAll(applyFieldRecursive(name, value));
} catch (IllegalArgumentException e) {
}
} );
patchedNodes.add(node);
return patchedNodes;
}
private List<Node> applyFieldRecursive(String name, Inspector value) {
switch (name) {
case WANT_TO_RETIRE:
List<Node> childNodes = node.type().isDockerHost() ? nodes.get().childrenOf(node).asList() : List.of();
return childNodes.stream()
.map(child -> applyField(child, name, value))
.collect(Collectors.toList());
default :
throw new IllegalArgumentException("Field " + name + " is not recursive");
}
}
private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) {
Node patchedNode;
if (reportsInspector.type() == Type.NIX) {
patchedNode = node.with(new Reports());
} else {
var reportsBuilder = new Reports.Builder(node.reports());
reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> {
if (reportInspector.type() == Type.NIX) {
reportsBuilder.clearReport(reportId);
} else {
reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector));
}
});
patchedNode = node.with(reportsBuilder.build());
}
boolean hadHardFailReports = node.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
boolean hasHardFailReports = patchedNode.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
if (hadHardFailReports != hasHardFailReports) {
if ((hasHardFailReports && node.state() == Node.State.failed) || node.state() == Node.State.parked)
return patchedNode;
patchedNode = patchedNode.with(patchedNode.status().withWantToDeprovision(hasHardFailReports));
}
return patchedNode;
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchRequiredDiskSpeed(String value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRequestedResources(
allocation.get().requestedResources().with(NodeResources.DiskSpeed.valueOf(value))));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private Optional<String> asOptionalString(Inspector field) {
return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field));
}
private Optional<String> removeQuotedNulls(Optional<String> value) {
return value.filter(v -> !v.equals("null"));
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} | class NodePatcher {
private static final String WANT_TO_RETIRE = "wantToRetire";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final Supplier<LockedNodeList> nodes;
private final Clock clock;
private Node node;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, Supplier<LockedNodeList> nodes, Clock clock) {
this.nodeFlavors = nodeFlavors;
this.node = node;
this.nodes = nodes;
this.clock = clock;
try {
this.inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
} catch (IOException e) {
throw new UncheckedIOException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
List<Node> patchedNodes = new ArrayList<>();
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(node, name, value);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
try {
patchedNodes.addAll(applyFieldRecursive(name, value));
} catch (IllegalArgumentException e) {
}
} );
patchedNodes.add(node);
return patchedNodes;
}
private List<Node> applyFieldRecursive(String name, Inspector value) {
switch (name) {
case WANT_TO_RETIRE:
List<Node> childNodes = node.type().isDockerHost() ? nodes.get().childrenOf(node).asList() : List.of();
return childNodes.stream()
.map(child -> applyField(child, name, value))
.collect(Collectors.toList());
default :
throw new IllegalArgumentException("Field " + name + " is not recursive");
}
}
private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) {
Node patchedNode;
if (reportsInspector.type() == Type.NIX) {
patchedNode = node.with(new Reports());
} else {
var reportsBuilder = new Reports.Builder(node.reports());
reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> {
if (reportInspector.type() == Type.NIX) {
reportsBuilder.clearReport(reportId);
} else {
reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector));
}
});
patchedNode = node.with(reportsBuilder.build());
}
boolean hadHardFailReports = node.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
boolean hasHardFailReports = patchedNode.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
if (hadHardFailReports != hasHardFailReports) {
if ((hasHardFailReports && node.state() == Node.State.failed) || node.state() == Node.State.parked)
return patchedNode;
patchedNode = patchedNode.with(patchedNode.status().withWantToDeprovision(hasHardFailReports));
}
return patchedNode;
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchRequiredDiskSpeed(String value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRequestedResources(
allocation.get().requestedResources().with(NodeResources.DiskSpeed.valueOf(value))));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private Optional<String> asOptionalString(Inspector field) {
return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field));
}
private Optional<String> removeQuotedNulls(Optional<String> value) {
return value.filter(v -> !v.equals("null"));
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} |
The overall impact of this PR should be a great win, so merging now... | private Node applyField(Node node, String name, Inspector value) {
switch (name) {
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), clock.instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
if (node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
throw new IllegalArgumentException("Docker image can only be set for docker containers");
return node.with(node.status().withDockerImage(DockerImage.fromString(asString(value))));
case "vespaVersion" :
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentOsVersion" :
return node.withCurrentOsVersion(Version.fromString(asString(value)), clock.instant());
case "currentFirmwareCheck":
return node.withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value)));
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(asStringSet(value))), nodes.get());
case "additionalIpAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), nodes.get());
case WANT_TO_RETIRE :
return node.withWantToRetire(asBoolean(value), Agent.operator, clock.instant());
case "wantToDeprovision" :
if (node.type() != NodeType.host && asBoolean(value))
throw new IllegalArgumentException("wantToDeprovision can only be set for hosts");
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "reports" :
return nodeWithPatchedReports(node, value);
case "openStackId" :
return node.withOpenStackId(asString(value));
case "diskGb":
case "minDiskAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble())));
case "memoryGb":
case "minMainMemoryAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble())));
case "vcpu":
case "minCpuCores":
return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble())));
case "fastDisk":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? fast : slow)));
case "remoteStorage":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? remote : local)));
case "bandwidthGbps":
return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble())));
case "modelName":
return value.type() == Type.NIX ? node.withoutModelName() : node.withModelName(asString(value));
case "requiredDiskSpeed":
return patchRequiredDiskSpeed(asString(value));
case "reservedTo":
return value.type() == Type.NIX ? node.withoutReservedTo() : node.withReservedTo(TenantName.from(value.asString()));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), nodes.get()); | private Node applyField(Node node, String name, Inspector value) {
switch (name) {
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), clock.instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
if (node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
throw new IllegalArgumentException("Docker image can only be set for docker containers");
return node.with(node.status().withDockerImage(DockerImage.fromString(asString(value))));
case "vespaVersion" :
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentOsVersion" :
return node.withCurrentOsVersion(Version.fromString(asString(value)), clock.instant());
case "currentFirmwareCheck":
return node.withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value)));
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(asStringSet(value))), nodes.get());
case "additionalIpAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), nodes.get());
case WANT_TO_RETIRE :
return node.withWantToRetire(asBoolean(value), Agent.operator, clock.instant());
case "wantToDeprovision" :
if (node.type() != NodeType.host && asBoolean(value))
throw new IllegalArgumentException("wantToDeprovision can only be set for hosts");
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "reports" :
return nodeWithPatchedReports(node, value);
case "openStackId" :
return node.withOpenStackId(asString(value));
case "diskGb":
case "minDiskAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble())));
case "memoryGb":
case "minMainMemoryAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble())));
case "vcpu":
case "minCpuCores":
return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble())));
case "fastDisk":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? fast : slow)));
case "remoteStorage":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? remote : local)));
case "bandwidthGbps":
return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble())));
case "modelName":
return value.type() == Type.NIX ? node.withoutModelName() : node.withModelName(asString(value));
case "requiredDiskSpeed":
return patchRequiredDiskSpeed(asString(value));
case "reservedTo":
return value.type() == Type.NIX ? node.withoutReservedTo() : node.withReservedTo(TenantName.from(value.asString()));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | class NodePatcher {
private static final String WANT_TO_RETIRE = "wantToRetire";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final Supplier<LockedNodeList> nodes;
private final Clock clock;
private Node node;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, Supplier<LockedNodeList> nodes, Clock clock) {
this.nodeFlavors = nodeFlavors;
this.node = node;
this.nodes = nodes;
this.clock = clock;
try {
this.inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
} catch (IOException e) {
throw new UncheckedIOException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
List<Node> patchedNodes = new ArrayList<>();
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(node, name, value);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
try {
patchedNodes.addAll(applyFieldRecursive(name, value));
} catch (IllegalArgumentException e) {
}
} );
patchedNodes.add(node);
return patchedNodes;
}
private List<Node> applyFieldRecursive(String name, Inspector value) {
switch (name) {
case WANT_TO_RETIRE:
List<Node> childNodes = node.type().isDockerHost() ? nodes.get().childrenOf(node).asList() : List.of();
return childNodes.stream()
.map(child -> applyField(child, name, value))
.collect(Collectors.toList());
default :
throw new IllegalArgumentException("Field " + name + " is not recursive");
}
}
private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) {
Node patchedNode;
if (reportsInspector.type() == Type.NIX) {
patchedNode = node.with(new Reports());
} else {
var reportsBuilder = new Reports.Builder(node.reports());
reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> {
if (reportInspector.type() == Type.NIX) {
reportsBuilder.clearReport(reportId);
} else {
reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector));
}
});
patchedNode = node.with(reportsBuilder.build());
}
boolean hadHardFailReports = node.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
boolean hasHardFailReports = patchedNode.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
if (hadHardFailReports != hasHardFailReports) {
if ((hasHardFailReports && node.state() == Node.State.failed) || node.state() == Node.State.parked)
return patchedNode;
patchedNode = patchedNode.with(patchedNode.status().withWantToDeprovision(hasHardFailReports));
}
return patchedNode;
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchRequiredDiskSpeed(String value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRequestedResources(
allocation.get().requestedResources().with(NodeResources.DiskSpeed.valueOf(value))));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private Optional<String> asOptionalString(Inspector field) {
return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field));
}
private Optional<String> removeQuotedNulls(Optional<String> value) {
return value.filter(v -> !v.equals("null"));
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} | class NodePatcher {
private static final String WANT_TO_RETIRE = "wantToRetire";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final Supplier<LockedNodeList> nodes;
private final Clock clock;
private Node node;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, Supplier<LockedNodeList> nodes, Clock clock) {
this.nodeFlavors = nodeFlavors;
this.node = node;
this.nodes = nodes;
this.clock = clock;
try {
this.inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
} catch (IOException e) {
throw new UncheckedIOException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
List<Node> patchedNodes = new ArrayList<>();
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(node, name, value);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
try {
patchedNodes.addAll(applyFieldRecursive(name, value));
} catch (IllegalArgumentException e) {
}
} );
patchedNodes.add(node);
return patchedNodes;
}
private List<Node> applyFieldRecursive(String name, Inspector value) {
switch (name) {
case WANT_TO_RETIRE:
List<Node> childNodes = node.type().isDockerHost() ? nodes.get().childrenOf(node).asList() : List.of();
return childNodes.stream()
.map(child -> applyField(child, name, value))
.collect(Collectors.toList());
default :
throw new IllegalArgumentException("Field " + name + " is not recursive");
}
}
private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) {
Node patchedNode;
if (reportsInspector.type() == Type.NIX) {
patchedNode = node.with(new Reports());
} else {
var reportsBuilder = new Reports.Builder(node.reports());
reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> {
if (reportInspector.type() == Type.NIX) {
reportsBuilder.clearReport(reportId);
} else {
reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector));
}
});
patchedNode = node.with(reportsBuilder.build());
}
boolean hadHardFailReports = node.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
boolean hasHardFailReports = patchedNode.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
if (hadHardFailReports != hasHardFailReports) {
if ((hasHardFailReports && node.state() == Node.State.failed) || node.state() == Node.State.parked)
return patchedNode;
patchedNode = patchedNode.with(patchedNode.status().withWantToDeprovision(hasHardFailReports));
}
return patchedNode;
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchRequiredDiskSpeed(String value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRequestedResources(
allocation.get().requestedResources().with(NodeResources.DiskSpeed.valueOf(value))));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private Optional<String> asOptionalString(Inspector field) {
return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field));
}
private Optional<String> removeQuotedNulls(Optional<String> value) {
return value.filter(v -> !v.equals("null"));
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} |
Too late to forbid these? | private static String sanitizeUpstream(String part) {
return truncate(part.toLowerCase()
.replace('_', '-')
.replaceAll("[^a-z0-9-]*", ""));
} | .replaceAll("[^a-z0-9-]*", "")); | private static String sanitizeUpstream(String part) {
return truncate(part.toLowerCase()
.replace('_', '-')
.replaceAll("[^a-z0-9-]*", ""));
} | class Endpoint {
private static final String YAHOO_DNS_SUFFIX = ".vespa.yahooapis.com";
private static final String OATH_DNS_SUFFIX = ".vespa.oath.cloud";
private static final String PUBLIC_DNS_SUFFIX = ".public.vespa.oath.cloud";
private static final String PUBLIC_CD_DNS_SUFFIX = ".public-cd.vespa.oath.cloud";
private final String name;
private final URI url;
private final Scope scope;
private final boolean legacy;
private final RoutingMethod routingMethod;
private final boolean tls;
private final boolean wildcard;
private Endpoint(String name, ApplicationId application, ZoneId zone, SystemName system, Port port, boolean legacy,
RoutingMethod routingMethod, boolean wildcard) {
Objects.requireNonNull(name, "name must be non-null");
Objects.requireNonNull(application, "application must be non-null");
Objects.requireNonNull(system, "system must be non-null");
Objects.requireNonNull(port, "port must be non-null");
Objects.requireNonNull(routingMethod, "routingMethod must be non-null");
this.name = name;
this.url = createUrl(name, application, zone, system, port, legacy, routingMethod);
this.scope = zone == null ? Scope.global : Scope.zone;
this.legacy = legacy;
this.routingMethod = routingMethod;
this.tls = port.tls;
this.wildcard = wildcard;
}
/**
* Returns the name of this endpoint (the first component of the DNS name). Depending on the endpoint type, this
* can be one of the following:
* - A wildcard (any scope)
* - A cluster name (only zone scope)
* - An endpoint ID (only global scope)
*/
public String name() {
return name;
}
/** Returns the URL used to access this */
public URI url() {
return url;
}
/** Returns the DNS name of this */
public String dnsName() {
return url.getAuthority().replaceAll(":.*", "");
}
/** Returns the scope of this */
public Scope scope() {
return scope;
}
/** Returns whether this is considered a legacy DNS name that is due for removal */
public boolean legacy() {
return legacy;
}
/** Returns the routing used for this */
public RoutingMethod routingMethod() {
return routingMethod;
}
/** Returns whether this endpoint supports TLS connections */
public boolean tls() {
return tls;
}
/** Returns whether this requires a rotation to be reachable */
public boolean requiresRotation() {
return routingMethod.isShared() && scope == Scope.global;
}
/** Returns whether this is a wildcard endpoint (used only in certificates) */
public boolean wildcard() {
return wildcard;
}
/** Returns the upstream ID of given deployment. This *must* match what the routing layer generates */
public String upstreamIdOf(DeploymentId deployment) {
if (scope != Scope.global) throw new IllegalArgumentException("Scope " + scope + " does not have upstream name");
if (!routingMethod.isShared()) throw new IllegalArgumentException("Routing method " + routingMethod + " does not have upstream name");
return upstreamIdOf(name, deployment.applicationId(), deployment.zoneId());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Endpoint endpoint = (Endpoint) o;
return url.equals(endpoint.url);
}
@Override
public int hashCode() {
return Objects.hash(url);
}
@Override
public String toString() {
return String.format("endpoint %s [scope=%s, legacy=%s, routingMethod=%s]", url, scope, legacy, routingMethod);
}
/** Returns the DNS suffix used for endpoints in given system */
public static String dnsSuffix(SystemName system) {
return dnsSuffix(system, false);
}
private static URI createUrl(String name, ApplicationId application, ZoneId zone, SystemName system,
Port port, boolean legacy, RoutingMethod routingMethod) {
String scheme = port.tls ? "https" : "http";
String separator = separator(system, routingMethod, port.tls);
String portPart = port.isDefault() ? "" : ":" + port.port;
return URI.create(scheme + ":
sanitize(namePart(name, separator)) +
systemPart(system, separator) +
sanitize(instancePart(application, separator)) +
sanitize(application.application().value()) +
separator +
sanitize(application.tenant().value()) +
"." +
scopePart(zone, legacy) +
dnsSuffix(system, legacy) +
portPart +
"/");
}
private static String sanitize(String part) {
return part.replace('_', '-');
}
private static String separator(SystemName system, RoutingMethod routingMethod, boolean tls) {
if (!tls) return ".";
if (routingMethod.isDirect()) return ".";
if (system.isPublic()) return ".";
return "--";
}
private static String namePart(String name, String separator) {
if ("default".equals(name)) return "";
return name + separator;
}
private static String scopePart(ZoneId zone, boolean legacy) {
if (zone == null) return "global";
if (!legacy && zone.environment().isProduction()) return zone.region().value();
return zone.region().value() + "." + zone.environment().value();
}
private static String instancePart(ApplicationId application, String separator) {
if (application.instance().isDefault()) return "";
return application.instance().value() + separator;
}
private static String systemPart(SystemName system, String separator) {
if (!system.isCd()) return "";
return system.value() + separator;
}
private static String dnsSuffix(SystemName system, boolean legacy) {
switch (system) {
case cd:
case main:
if (legacy) return YAHOO_DNS_SUFFIX;
return OATH_DNS_SUFFIX;
case Public:
return PUBLIC_DNS_SUFFIX;
case PublicCd:
return PUBLIC_CD_DNS_SUFFIX;
default: throw new IllegalArgumentException("No DNS suffix declared for system " + system);
}
}
private static String upstreamIdOf(String name, ApplicationId application, ZoneId zone) {
return Stream.of(namePart(name, ""),
instancePart(application, ""),
application.tenant().value(),
application.application().value(),
zone.region().value(),
zone.environment().value())
.filter(Predicate.not(String::isEmpty))
.map(Endpoint::sanitizeUpstream)
.collect(Collectors.joining("."));
}
/** Remove any invalid characters from a upstream part */
/** Truncate the given part at the front so its length does not exceed 63 characters */
private static String truncate(String part) {
return part.substring(Math.max(0, part.length() - 63));
}
/** An endpoint's scope */
public enum Scope {
/** Endpoint points to all zones */
global,
/** Endpoint points to a single zone */
zone,
}
/** Represents an endpoint's HTTP port */
public static class Port {
private final int port;
private final boolean tls;
private Port(int port, boolean tls) {
if (port < 1 || port > 65535) {
throw new IllegalArgumentException("Port must be between 1 and 65535, got " + port);
}
this.port = port;
this.tls = tls;
}
private boolean isDefault() {
return port == 80 || port == 443;
}
/** Returns the default HTTPS port */
public static Port tls() {
return new Port(443, true);
}
/** Returns default port for the given routing method */
public static Port fromRoutingMethod(RoutingMethod method) {
if (method.isDirect()) return Port.tls();
return Port.tls(4443);
}
/** Create a HTTPS port */
public static Port tls(int port) {
return new Port(port, true);
}
/** Create a HTTP port */
public static Port plain(int port) {
return new Port(port, false);
}
}
/** Build an endpoint for given application */
public static EndpointBuilder of(ApplicationId application) {
return new EndpointBuilder(application);
}
public static class EndpointBuilder {
private final ApplicationId application;
private ZoneId zone;
private ClusterSpec.Id cluster;
private EndpointId endpointId;
private Port port;
private RoutingMethod routingMethod = RoutingMethod.shared;
private boolean legacy = false;
private boolean wildcard = false;
private EndpointBuilder(ApplicationId application) {
this.application = application;
}
/** Sets the cluster target for this */
public EndpointBuilder target(ClusterSpec.Id cluster, ZoneId zone) {
if (endpointId != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.cluster = cluster;
this.zone = zone;
return this;
}
/** Sets the endpoint target ID for this (as defined in deployments.xml) */
public EndpointBuilder named(EndpointId endpointId) {
if (cluster != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.endpointId = endpointId;
return this;
}
/** Sets the global wildcard target for this */
public EndpointBuilder wildcard() {
if (endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.wildcard = true;
return this;
}
/** Sets the zone wildcard target for this */
public EndpointBuilder wildcard(ZoneId zone) {
if(endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.zone = zone;
this.wildcard = true;
return this;
}
/** Sets the port of this */
public EndpointBuilder on(Port port) {
this.port = port;
return this;
}
/** Marks this as a legacy endpoint */
public EndpointBuilder legacy() {
this.legacy = true;
return this;
}
/** Sets the routing method for this */
public EndpointBuilder routingMethod(RoutingMethod method) {
this.routingMethod = method;
return this;
}
/** Sets the system that owns this */
public Endpoint in(SystemName system) {
String name;
if (wildcard) {
name = "*";
} else if (endpointId != null) {
name = endpointId.id();
} else if (cluster != null) {
name = cluster.value();
} else {
throw new IllegalArgumentException("Must set either cluster, rotation or wildcard target");
}
if (system.isPublic() && routingMethod != RoutingMethod.exclusive) {
throw new IllegalArgumentException("Public system only supports routing method " + RoutingMethod.exclusive);
}
if (routingMethod.isDirect() && !port.isDefault()) {
throw new IllegalArgumentException("Routing method " + routingMethod + " can only use default port");
}
return new Endpoint(name, application, zone, system, port, legacy, routingMethod, wildcard);
}
}
} | class Endpoint {
private static final String YAHOO_DNS_SUFFIX = ".vespa.yahooapis.com";
private static final String OATH_DNS_SUFFIX = ".vespa.oath.cloud";
private static final String PUBLIC_DNS_SUFFIX = ".public.vespa.oath.cloud";
private static final String PUBLIC_CD_DNS_SUFFIX = ".public-cd.vespa.oath.cloud";
private final String name;
private final URI url;
private final Scope scope;
private final boolean legacy;
private final RoutingMethod routingMethod;
private final boolean tls;
private final boolean wildcard;
private Endpoint(String name, ApplicationId application, ZoneId zone, SystemName system, Port port, boolean legacy,
RoutingMethod routingMethod, boolean wildcard) {
Objects.requireNonNull(name, "name must be non-null");
Objects.requireNonNull(application, "application must be non-null");
Objects.requireNonNull(system, "system must be non-null");
Objects.requireNonNull(port, "port must be non-null");
Objects.requireNonNull(routingMethod, "routingMethod must be non-null");
this.name = name;
this.url = createUrl(name, application, zone, system, port, legacy, routingMethod);
this.scope = zone == null ? Scope.global : Scope.zone;
this.legacy = legacy;
this.routingMethod = routingMethod;
this.tls = port.tls;
this.wildcard = wildcard;
}
/**
* Returns the name of this endpoint (the first component of the DNS name). Depending on the endpoint type, this
* can be one of the following:
* - A wildcard (any scope)
* - A cluster name (only zone scope)
* - An endpoint ID (only global scope)
*/
public String name() {
return name;
}
/** Returns the URL used to access this */
public URI url() {
return url;
}
/** Returns the DNS name of this */
public String dnsName() {
return url.getAuthority().replaceAll(":.*", "");
}
/** Returns the scope of this */
public Scope scope() {
return scope;
}
/** Returns whether this is considered a legacy DNS name that is due for removal */
public boolean legacy() {
return legacy;
}
/** Returns the routing used for this */
public RoutingMethod routingMethod() {
return routingMethod;
}
/** Returns whether this endpoint supports TLS connections */
public boolean tls() {
return tls;
}
/** Returns whether this requires a rotation to be reachable */
public boolean requiresRotation() {
return routingMethod.isShared() && scope == Scope.global;
}
/** Returns whether this is a wildcard endpoint (used only in certificates) */
public boolean wildcard() {
return wildcard;
}
/** Returns the upstream ID of given deployment. This *must* match what the routing layer generates */
public String upstreamIdOf(DeploymentId deployment) {
if (scope != Scope.global) throw new IllegalArgumentException("Scope " + scope + " does not have upstream name");
if (!routingMethod.isShared()) throw new IllegalArgumentException("Routing method " + routingMethod + " does not have upstream name");
return upstreamIdOf(name, deployment.applicationId(), deployment.zoneId());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Endpoint endpoint = (Endpoint) o;
return url.equals(endpoint.url);
}
@Override
public int hashCode() {
return Objects.hash(url);
}
@Override
public String toString() {
return String.format("endpoint %s [scope=%s, legacy=%s, routingMethod=%s]", url, scope, legacy, routingMethod);
}
/** Returns the DNS suffix used for endpoints in given system */
public static String dnsSuffix(SystemName system) {
return dnsSuffix(system, false);
}
private static URI createUrl(String name, ApplicationId application, ZoneId zone, SystemName system,
Port port, boolean legacy, RoutingMethod routingMethod) {
String scheme = port.tls ? "https" : "http";
String separator = separator(system, routingMethod, port.tls);
String portPart = port.isDefault() ? "" : ":" + port.port;
return URI.create(scheme + ":
sanitize(namePart(name, separator)) +
systemPart(system, separator) +
sanitize(instancePart(application, separator)) +
sanitize(application.application().value()) +
separator +
sanitize(application.tenant().value()) +
"." +
scopePart(zone, legacy) +
dnsSuffix(system, legacy) +
portPart +
"/");
}
private static String sanitize(String part) {
return part.replace('_', '-');
}
private static String separator(SystemName system, RoutingMethod routingMethod, boolean tls) {
if (!tls) return ".";
if (routingMethod.isDirect()) return ".";
if (system.isPublic()) return ".";
return "--";
}
private static String namePart(String name, String separator) {
if ("default".equals(name)) return "";
return name + separator;
}
private static String scopePart(ZoneId zone, boolean legacy) {
if (zone == null) return "global";
if (!legacy && zone.environment().isProduction()) return zone.region().value();
return zone.region().value() + "." + zone.environment().value();
}
private static String instancePart(ApplicationId application, String separator) {
if (application.instance().isDefault()) return "";
return application.instance().value() + separator;
}
private static String systemPart(SystemName system, String separator) {
if (!system.isCd()) return "";
return system.value() + separator;
}
private static String dnsSuffix(SystemName system, boolean legacy) {
switch (system) {
case cd:
case main:
if (legacy) return YAHOO_DNS_SUFFIX;
return OATH_DNS_SUFFIX;
case Public:
return PUBLIC_DNS_SUFFIX;
case PublicCd:
return PUBLIC_CD_DNS_SUFFIX;
default: throw new IllegalArgumentException("No DNS suffix declared for system " + system);
}
}
private static String upstreamIdOf(String name, ApplicationId application, ZoneId zone) {
return Stream.of(namePart(name, ""),
instancePart(application, ""),
application.tenant().value(),
application.application().value(),
zone.region().value(),
zone.environment().value())
.filter(Predicate.not(String::isEmpty))
.map(Endpoint::sanitizeUpstream)
.collect(Collectors.joining("."));
}
/** Remove any invalid characters from a upstream part */
/** Truncate the given part at the front so its length does not exceed 63 characters */
private static String truncate(String part) {
return part.substring(Math.max(0, part.length() - 63));
}
/** An endpoint's scope */
public enum Scope {
/** Endpoint points to all zones */
global,
/** Endpoint points to a single zone */
zone,
}
/** Represents an endpoint's HTTP port */
public static class Port {
private final int port;
private final boolean tls;
private Port(int port, boolean tls) {
if (port < 1 || port > 65535) {
throw new IllegalArgumentException("Port must be between 1 and 65535, got " + port);
}
this.port = port;
this.tls = tls;
}
private boolean isDefault() {
return port == 80 || port == 443;
}
/** Returns the default HTTPS port */
public static Port tls() {
return new Port(443, true);
}
/** Returns default port for the given routing method */
public static Port fromRoutingMethod(RoutingMethod method) {
if (method.isDirect()) return Port.tls();
return Port.tls(4443);
}
/** Create a HTTPS port */
public static Port tls(int port) {
return new Port(port, true);
}
/** Create a HTTP port */
public static Port plain(int port) {
return new Port(port, false);
}
}
/** Build an endpoint for given application */
public static EndpointBuilder of(ApplicationId application) {
return new EndpointBuilder(application);
}
public static class EndpointBuilder {
private final ApplicationId application;
private ZoneId zone;
private ClusterSpec.Id cluster;
private EndpointId endpointId;
private Port port;
private RoutingMethod routingMethod = RoutingMethod.shared;
private boolean legacy = false;
private boolean wildcard = false;
private EndpointBuilder(ApplicationId application) {
this.application = application;
}
/** Sets the cluster target for this */
public EndpointBuilder target(ClusterSpec.Id cluster, ZoneId zone) {
if (endpointId != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.cluster = cluster;
this.zone = zone;
return this;
}
/** Sets the endpoint target ID for this (as defined in deployments.xml) */
public EndpointBuilder named(EndpointId endpointId) {
if (cluster != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.endpointId = endpointId;
return this;
}
/** Sets the global wildcard target for this */
public EndpointBuilder wildcard() {
if (endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.wildcard = true;
return this;
}
/** Sets the zone wildcard target for this */
public EndpointBuilder wildcard(ZoneId zone) {
if(endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.zone = zone;
this.wildcard = true;
return this;
}
/** Sets the port of this */
public EndpointBuilder on(Port port) {
this.port = port;
return this;
}
/** Marks this as a legacy endpoint */
public EndpointBuilder legacy() {
this.legacy = true;
return this;
}
/** Sets the routing method for this */
public EndpointBuilder routingMethod(RoutingMethod method) {
this.routingMethod = method;
return this;
}
/** Sets the system that owns this */
public Endpoint in(SystemName system) {
String name;
if (wildcard) {
name = "*";
} else if (endpointId != null) {
name = endpointId.id();
} else if (cluster != null) {
name = cluster.value();
} else {
throw new IllegalArgumentException("Must set either cluster, rotation or wildcard target");
}
if (system.isPublic() && routingMethod != RoutingMethod.exclusive) {
throw new IllegalArgumentException("Public system only supports routing method " + RoutingMethod.exclusive);
}
if (routingMethod.isDirect() && !port.isDefault()) {
throw new IllegalArgumentException("Routing method " + routingMethod + " can only use default port");
}
return new Endpoint(name, application, zone, system, port, legacy, routingMethod, wildcard);
}
}
} |
:'( | public static EndpointList global(RoutingId routingId, SystemName system, List<RoutingMethod> routingMethods) {
var endpoints = new ArrayList<Endpoint>();
for (var method : routingMethods) {
endpoints.add(Endpoint.of(routingId.application())
.named(routingId.endpointId())
.on(Port.fromRoutingMethod(method))
.routingMethod(method)
.in(system));
if (method == RoutingMethod.shared) {
endpoints.add(Endpoint.of(routingId.application())
.named(routingId.endpointId())
.on(Port.plain(4080))
.legacy()
.routingMethod(method)
.in(system));
endpoints.add(Endpoint.of(routingId.application())
.named(routingId.endpointId())
.on(Port.tls(4443))
.legacy()
.routingMethod(method)
.in(system));
}
}
return new EndpointList(endpoints);
} | .on(Port.plain(4080)) | public static EndpointList global(RoutingId routingId, SystemName system, List<RoutingMethod> routingMethods) {
var endpoints = new ArrayList<Endpoint>();
for (var method : routingMethods) {
endpoints.add(Endpoint.of(routingId.application())
.named(routingId.endpointId())
.on(Port.fromRoutingMethod(method))
.routingMethod(method)
.in(system));
if (method == RoutingMethod.shared) {
endpoints.add(Endpoint.of(routingId.application())
.named(routingId.endpointId())
.on(Port.plain(4080))
.legacy()
.routingMethod(method)
.in(system));
endpoints.add(Endpoint.of(routingId.application())
.named(routingId.endpointId())
.on(Port.tls(4443))
.legacy()
.routingMethod(method)
.in(system));
}
}
return new EndpointList(endpoints);
} | class EndpointList extends AbstractFilteringList<Endpoint, EndpointList> {
private EndpointList(Collection<? extends Endpoint> endpoints, boolean negate) {
super(endpoints, negate, EndpointList::new);
if (endpoints.stream().distinct().count() != endpoints.size()) {
throw new IllegalArgumentException("Expected all endpoints to be distinct, got " + endpoints);
}
}
private EndpointList(Collection<? extends Endpoint> endpoints) {
this(endpoints, false);
}
/** Returns the primary (non-legacy) endpoint, if any */
public Optional<Endpoint> primary() {
return not().matching(Endpoint::legacy).asList().stream().findFirst();
}
/** Returns the subset of endpoints named according to given ID */
public EndpointList named(EndpointId id) {
return matching(endpoint -> endpoint.name().equals(id.id()));
}
/** Returns the subset of endpoints are either legacy or not */
public EndpointList legacy(boolean legacy) {
return matching(endpoint -> endpoint.legacy() == legacy);
}
/** Returns the subset of endpoints that require a rotation */
public EndpointList requiresRotation() {
return matching(Endpoint::requiresRotation);
}
/** Returns the subset of endpoints with given scope */
public EndpointList scope(Endpoint.Scope scope) {
return matching(endpoint -> endpoint.scope() == scope);
}
/** Returns all global endpoints for given routing ID and system provided by given routing methods */
public static EndpointList global(RoutingId routingId, SystemName system, RoutingMethod routingMethod) {
return global(routingId, system, List.of(routingMethod));
}
public static EndpointList copyOf(Collection<Endpoint> endpoints) {
return new EndpointList(endpoints);
}
} | class EndpointList extends AbstractFilteringList<Endpoint, EndpointList> {
private EndpointList(Collection<? extends Endpoint> endpoints, boolean negate) {
super(endpoints, negate, EndpointList::new);
if (endpoints.stream().distinct().count() != endpoints.size()) {
throw new IllegalArgumentException("Expected all endpoints to be distinct, got " + endpoints);
}
}
private EndpointList(Collection<? extends Endpoint> endpoints) {
this(endpoints, false);
}
/** Returns the primary (non-legacy) endpoint, if any */
public Optional<Endpoint> primary() {
return not().matching(Endpoint::legacy).asList().stream().findFirst();
}
/** Returns the subset of endpoints named according to given ID */
public EndpointList named(EndpointId id) {
return matching(endpoint -> endpoint.name().equals(id.id()));
}
/** Returns the subset of endpoints that are considered legacy */
public EndpointList legacy() {
return matching(Endpoint::legacy);
}
/** Returns the subset of endpoints that require a rotation */
public EndpointList requiresRotation() {
return matching(Endpoint::requiresRotation);
}
/** Returns the subset of endpoints with given scope */
public EndpointList scope(Endpoint.Scope scope) {
return matching(endpoint -> endpoint.scope() == scope);
}
/** Returns all global endpoints for given routing ID and system provided by given routing methods */
public static EndpointList global(RoutingId routingId, SystemName system, RoutingMethod routingMethod) {
return global(routingId, system, List.of(routingMethod));
}
public static EndpointList copyOf(Collection<Endpoint> endpoints) {
return new EndpointList(endpoints);
}
} |
⭐️ | public RunId startSystemTestTests() {
var id = newRun(JobType.systemTest);
var testZone = JobType.systemTest.zone(tester.controller().system());
runner.run();
configServer().convergeServices(instanceId, testZone);
configServer().convergeServices(testerId.id(), testZone);
runner.run();
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.endTests));
assertTrue(jobs.run(id).get().steps().get(Step.endTests).startTime().isPresent());
return id;
} | var testZone = JobType.systemTest.zone(tester.controller().system()); | public RunId startSystemTestTests() {
var id = newRun(JobType.systemTest);
var testZone = JobType.systemTest.zone(tester.controller().system());
runner.run();
configServer().convergeServices(instanceId, testZone);
configServer().convergeServices(testerId.id(), testZone);
runner.run();
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.endTests));
assertTrue(jobs.run(id).get().steps().get(Step.endTests).startTime().isPresent());
return id;
} | class DeploymentContext {
public static final ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.upgradePolicy("default")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.emailRole("author")
.emailAddress("b@a")
.build();
public static final ApplicationPackage publicCdApplicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.upgradePolicy("default")
.region("aws-us-east-1c")
.emailRole("author")
.emailAddress("b@a")
.trust(generateCertificate())
.build();
public static final SourceRevision defaultSourceRevision = new SourceRevision("repository1", "master", "commit1");
private final TenantAndApplicationId applicationId;
private final ApplicationId instanceId;
private final TesterId testerId;
private final JobController jobs;
private final JobRunner runner;
private final DeploymentTester tester;
private ApplicationVersion lastSubmission = null;
private boolean deferDnsUpdates = false;
public DeploymentContext(ApplicationId instanceId, DeploymentTester tester) {
this.applicationId = TenantAndApplicationId.from(instanceId);
this.instanceId = instanceId;
this.testerId = TesterId.of(instanceId);
this.jobs = tester.controller().jobController();
this.runner = tester.runner();
this.tester = tester;
createTenantAndApplication();
}
private void createTenantAndApplication() {
try {
var tenant = tester.controllerTester().createTenant(instanceId.tenant().value());
tester.controllerTester().createApplication(tenant.value(), instanceId.application().value(), instanceId.instance().value());
} catch (IllegalArgumentException ignored) { }
}
public Application application() {
return tester.controller().applications().requireApplication(applicationId);
}
public Instance instance() {
return tester.controller().applications().requireInstance(instanceId);
}
public DeploymentStatus deploymentStatus() {
return tester.controller().jobController().deploymentStatus(application());
}
public Map<JobType, JobStatus> instanceJobs() {
return deploymentStatus().instanceJobs(instanceId.instance());
}
public Deployment deployment(ZoneId zone) {
return instance().deployments().get(zone);
}
public ApplicationId instanceId() {
return instanceId;
}
public TesterId testerId() { return testerId; }
public DeploymentId deploymentIdIn(ZoneId zone) {
return new DeploymentId(instanceId, zone);
}
/** Completely deploy the latest change */
public DeploymentContext deploy() {
assertTrue("Application package submitted", application().latestVersion().isPresent());
assertFalse("Submission is not already deployed", application().instances().values().stream()
.anyMatch(instance -> instance.deployments().values().stream()
.anyMatch(deployment -> deployment.applicationVersion().equals(lastSubmission))));
assertEquals(application().latestVersion(), instance().change().application());
completeRollout();
assertFalse(instance().change().hasTargets());
return this;
}
/** Upgrade platform of this to given version */
public DeploymentContext deployPlatform(Version version) {
assertEquals(instance().change().platform().get(), version);
assertFalse(application().instances().values().stream()
.anyMatch(instance -> instance.deployments().values().stream()
.anyMatch(deployment -> deployment.version().equals(version))));
assertEquals(version, instance().change().platform().get());
assertFalse(instance().change().application().isPresent());
completeRollout();
assertTrue(application().productionDeployments().values().stream()
.allMatch(deployments -> deployments.stream()
.allMatch(deployment -> deployment.version().equals(version))));
for (var spec : application().deploymentSpec().instances())
for (JobType type : new DeploymentSteps(spec, tester.controller()::system).productionJobs())
assertTrue(tester.configServer().nodeRepository()
.list(type.zone(tester.controller().system()), applicationId.defaultInstance()).stream()
.allMatch(node -> node.currentVersion().equals(version)));
assertFalse(instance().change().hasTargets());
return this;
}
/** Defer provisioning of load balancers in zones in given environment */
public DeploymentContext deferLoadBalancerProvisioningIn(Environment... environment) {
configServer().deferLoadBalancerProvisioningIn(Set.of(environment));
return this;
}
/** Defer DNS updates */
public DeploymentContext deferDnsUpdates() {
deferDnsUpdates = true;
return this;
}
/** Flush all pending DNS updates */
public DeploymentContext flushDnsUpdates() {
flushDnsUpdates(Integer.MAX_VALUE);
assertTrue("All name service requests dispatched",
tester.controller().curator().readNameServiceQueue().requests().isEmpty());
return this;
}
/** Flush count pending DNS updates */
public DeploymentContext flushDnsUpdates(int count) {
var dispatcher = new NameServiceDispatcher(tester.controller(), Duration.ofDays(1),
new JobControl(tester.controller().curator()), count);
dispatcher.run();
return this;
}
/** Add a routing policy for this in given zone, with status set to active */
public DeploymentContext addInactiveRoutingPolicy(ZoneId zone) {
var clusterId = "default-inactive";
var id = new RoutingPolicyId(instanceId, ClusterSpec.Id.from(clusterId), zone);
var policies = new LinkedHashMap<>(tester.controller().curator().readRoutingPolicies(instanceId));
policies.put(id, new RoutingPolicy(id, HostName.from("lb-host"),
Optional.empty(),
Set.of(EndpointId.of("default")),
new Status(false, GlobalRouting.DEFAULT_STATUS)));
tester.controller().curator().writeRoutingPolicies(instanceId, policies);
return this;
}
/** Submit given application package for deployment */
public DeploymentContext submit(ApplicationPackage applicationPackage) {
return submit(applicationPackage, Optional.of(defaultSourceRevision));
}
/** Submit given application package for deployment */
public DeploymentContext submit(ApplicationPackage applicationPackage, Optional<SourceRevision> sourceRevision) {
var projectId = tester.controller().applications()
.requireApplication(applicationId)
.projectId()
.orElse(1000);
lastSubmission = jobs.submit(applicationId, sourceRevision, Optional.of("a@b"), Optional.empty(),
Optional.empty(), projectId, applicationPackage, new byte[0]);
return this;
}
/** Submit the default application package for deployment */
public DeploymentContext submit() {
return submit(tester.controller().system().isPublic() ? publicCdApplicationPackage : applicationPackage);
}
/** Trigger all outstanding jobs, if any */
public DeploymentContext triggerJobs() {
while (tester.controller().applications().deploymentTrigger().triggerReadyJobs() > 0);
return this;
}
/** Fail current deployment in given job */
public DeploymentContext outOfCapacity(JobType type) {
return failDeployment(type,
new ConfigServerException(URI.create("https:
"Failed to deploy application",
"Out of capacity",
ConfigServerException.ErrorCode.OUT_OF_CAPACITY,
new RuntimeException("Out of capacity from test code")));
}
/** Fail current deployment in given job */
public DeploymentContext failDeployment(JobType type) {
return failDeployment(type, new IllegalArgumentException("Exception from test code"));
}
/** Fail current deployment in given job */
private DeploymentContext failDeployment(JobType type, RuntimeException exception) {
triggerJobs();
var job = jobId(type);
RunId id = currentRun(job).id();
configServer().throwOnNextPrepare(exception);
runner.advance(currentRun(job));
assertTrue(jobs.run(id).get().hasFailed());
assertTrue(jobs.run(id).get().hasEnded());
return this;
}
/** Returns the last submitted application version */
public Optional<ApplicationVersion> lastSubmission() {
return Optional.ofNullable(lastSubmission);
}
/** Runs and returns all remaining jobs for the application, at most once, and asserts the current change is rolled out. */
public DeploymentContext completeRollout() {
triggerJobs();
Set<JobType> jobs = new HashSet<>();
List<Run> activeRuns;
while ( ! (activeRuns = this.jobs.active(applicationId)).isEmpty())
for (Run run : activeRuns)
if (jobs.add(run.id().type())) {
runJob(run.id().type());
triggerJobs();
}
else
throw new AssertionError("Job '" + run.id() + "' was run twice");
assertFalse("Change should have no targets, but was " + instance().change(), instance().change().hasTargets());
if (!deferDnsUpdates) {
flushDnsUpdates();
}
return this;
}
/** Runs a deployment of the given package to the given dev/perf job, on the given version. */
public DeploymentContext runJob(JobType type, ApplicationPackage applicationPackage, Version vespaVersion) {
jobs.deploy(instanceId, type, Optional.ofNullable(vespaVersion), applicationPackage);
return runJob(type);
}
/** Runs a deployment of the given package to the given dev/perf job. */
public DeploymentContext runJob(JobType type, ApplicationPackage applicationPackage) {
return runJob(type, applicationPackage, null);
}
/** Pulls the ready job trigger, and then runs the whole of the given job, successfully. */
public DeploymentContext runJob(JobType type) {
var job = jobId(type);
triggerJobs();
doDeploy(job);
if (job.type().isDeployment()) {
doUpgrade(job);
doConverge(job);
if (job.type().environment().isManuallyDeployed())
return this;
}
if (job.type().isTest())
doTests(job);
return this;
}
/** Abort the running job of the given type and. */
public DeploymentContext abortJob(JobType type) {
var job = jobId(type);
assertNotSame(RunStatus.aborted, currentRun(job).status());
jobs.abort(currentRun(job).id());
jobAborted(type);
return this;
}
/** Finish an already aborted run of the given type. */
public DeploymentContext jobAborted(JobType type) {
Run run = jobs.last(instanceId, type).get();
assertSame(RunStatus.aborted, run.status());
assertFalse(run.hasEnded());
runner.advance(run);
assertTrue(jobs.run(run.id()).get().hasEnded());
return this;
}
/** Simulate upgrade time out in given job */
public DeploymentContext timeOutUpgrade(JobType type) {
var job = jobId(type);
triggerJobs();
RunId id = currentRun(job).id();
doDeploy(job);
tester.clock().advance(InternalStepRunner.installationTimeout.plusSeconds(1));
runner.advance(currentRun(job));
assertTrue(jobs.run(id).get().hasFailed());
assertTrue(jobs.run(id).get().hasEnded());
return this;
}
/** Simulate convergence time out in given job */
public DeploymentContext timeOutConvergence(JobType type) {
var job = jobId(type);
triggerJobs();
RunId id = currentRun(job).id();
doDeploy(job);
doUpgrade(job);
tester.clock().advance(InternalStepRunner.installationTimeout.plusSeconds(1));
runner.advance(currentRun(job));
assertTrue(jobs.run(id).get().hasFailed());
assertTrue(jobs.run(id).get().hasEnded());
return this;
}
/** Deploy default application package, start a run for that change and return its ID */
public RunId newRun(JobType type) {
submit();
tester.readyJobsTrigger().maintain();
if (type.isProduction()) {
runJob(JobType.systemTest);
runJob(JobType.stagingTest);
tester.readyJobsTrigger().maintain();
}
Run run = jobs.active().stream()
.filter(r -> r.id().type() == type)
.findAny()
.orElseThrow(() -> new AssertionError(type + " is not among the active: " + jobs.active()));
return run.id();
}
/** Start tests in system test stage */
public void assertRunning(JobType type) {
assertTrue(jobId(type) + " should be among the active: " + jobs.active(),
jobs.active().stream().anyMatch(run -> run.id().application().equals(instanceId) && run.id().type() == type));
}
public void assertNotRunning(JobType type) {
assertFalse(jobId(type) + " should not be among the active: " + jobs.active(),
jobs.active().stream().anyMatch(run -> run.id().application().equals(instanceId) && run.id().type() == type));
}
/** Deploys tester and real app, and completes tester and initial staging installation first if needed. */
private void doDeploy(JobId job) {
RunId id = currentRun(job).id();
ZoneId zone = zone(job);
DeploymentId deployment = new DeploymentId(job.application(), zone);
runner.advance(currentRun(job));
if (job.type().isTest())
doInstallTester(job);
if (job.type() == JobType.stagingTest) {
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.installInitialReal));
Versions versions = currentRun(job).versions();
tester.configServer().nodeRepository().doUpgrade(deployment, Optional.empty(), versions.sourcePlatform().orElse(versions.targetPlatform()));
configServer().convergeServices(id.application(), zone);
runner.advance(currentRun(job));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installInitialReal));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installInitialReal));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installTester));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.startStagingSetup));
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.endStagingSetup));
tester.cloud().set(TesterCloud.Status.SUCCESS);
runner.advance(currentRun(job));
assertEquals(succeeded, jobs.run(id).get().stepStatuses().get(Step.endStagingSetup));
}
}
/** Upgrades nodes to target version. */
private void doUpgrade(JobId job) {
RunId id = currentRun(job).id();
ZoneId zone = zone(job);
DeploymentId deployment = new DeploymentId(job.application(), zone);
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.installReal));
configServer().nodeRepository().doUpgrade(deployment, Optional.empty(), currentRun(job).versions().targetPlatform());
runner.advance(currentRun(job));
}
/** Returns the current run for the given job type, and verifies it is still running normally. */
private Run currentRun(JobId job) {
Run run = jobs.last(job)
.filter(r -> r.id().type() == job.type())
.orElseThrow(() -> new AssertionError(job.type() + " is not among the active: " + jobs.active()));
assertFalse(run.id() + " should not have failed yet", run.hasFailed());
assertFalse(run.id() + " should not have ended yet", run.hasEnded());
return run;
}
/** Lets nodes converge on new application version. */
private void doConverge(JobId job) {
RunId id = currentRun(job).id();
ZoneId zone = zone(job);
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.installReal));
configServer().convergeServices(id.application(), zone);
runner.advance(currentRun(job));
if (job.type().environment().isManuallyDeployed()) {
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installReal));
assertTrue(jobs.run(id).get().hasEnded());
return;
}
assertEquals("Status of " + id, Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installReal));
}
/** Installs tester and starts tests. */
private void doInstallTester(JobId job) {
RunId id = currentRun(job).id();
ZoneId zone = zone(job);
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.installTester));
configServer().nodeRepository().doUpgrade(new DeploymentId(TesterId.of(job.application()).id(), zone), Optional.empty(), tester.controller().systemVersion());
runner.advance(currentRun(job));
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.installTester));
configServer().convergeServices(TesterId.of(id.application()).id(), zone);
runner.advance(currentRun(job));
assertEquals(succeeded, jobs.run(id).get().stepStatuses().get(Step.installTester));
runner.advance(currentRun(job));
}
/** Completes tests with success. */
private void doTests(JobId job) {
RunId id = currentRun(job).id();
ZoneId zone = zone(job);
if (job.type().isDeployment())
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installReal));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installTester));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.startTests));
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.endTests));
tester.cloud().set(TesterCloud.Status.SUCCESS);
runner.advance(currentRun(job));
assertTrue(jobs.run(id).get().hasEnded());
assertFalse(jobs.run(id).get().hasFailed());
assertEquals(job.type().isProduction(), instance().deployments().containsKey(zone));
assertTrue(configServer().nodeRepository().list(zone, TesterId.of(id.application()).id()).isEmpty());
}
private JobId jobId(JobType type) {
return new JobId(instanceId, type);
}
private ZoneId zone(JobId job) {
return job.type().zone(tester.controller().system());
}
private ConfigServerMock configServer() {
return tester.configServer();
}
private static X509Certificate generateCertificate() {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 256);
X500Principal subject = new X500Principal("CN=subject");
return X509CertificateBuilder.fromKeypair(keyPair,
subject,
Instant.now(),
Instant.now().plusSeconds(1),
SignatureAlgorithm.SHA512_WITH_ECDSA,
BigInteger.valueOf(1))
.build();
}
} | class DeploymentContext {
public static final ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.upgradePolicy("default")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.emailRole("author")
.emailAddress("b@a")
.build();
public static final ApplicationPackage publicCdApplicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.upgradePolicy("default")
.region("aws-us-east-1c")
.emailRole("author")
.emailAddress("b@a")
.trust(generateCertificate())
.build();
public static final SourceRevision defaultSourceRevision = new SourceRevision("repository1", "master", "commit1");
private final TenantAndApplicationId applicationId;
private final ApplicationId instanceId;
private final TesterId testerId;
private final JobController jobs;
private final JobRunner runner;
private final DeploymentTester tester;
private ApplicationVersion lastSubmission = null;
private boolean deferDnsUpdates = false;
public DeploymentContext(ApplicationId instanceId, DeploymentTester tester) {
this.applicationId = TenantAndApplicationId.from(instanceId);
this.instanceId = instanceId;
this.testerId = TesterId.of(instanceId);
this.jobs = tester.controller().jobController();
this.runner = tester.runner();
this.tester = tester;
createTenantAndApplication();
}
private void createTenantAndApplication() {
try {
var tenant = tester.controllerTester().createTenant(instanceId.tenant().value());
tester.controllerTester().createApplication(tenant.value(), instanceId.application().value(), instanceId.instance().value());
} catch (IllegalArgumentException ignored) { }
}
public Application application() {
return tester.controller().applications().requireApplication(applicationId);
}
public Instance instance() {
return tester.controller().applications().requireInstance(instanceId);
}
public DeploymentStatus deploymentStatus() {
return tester.controller().jobController().deploymentStatus(application());
}
public Map<JobType, JobStatus> instanceJobs() {
return deploymentStatus().instanceJobs(instanceId.instance());
}
public Deployment deployment(ZoneId zone) {
return instance().deployments().get(zone);
}
public ApplicationId instanceId() {
return instanceId;
}
public TesterId testerId() { return testerId; }
public DeploymentId deploymentIdIn(ZoneId zone) {
return new DeploymentId(instanceId, zone);
}
/** Completely deploy the latest change */
public DeploymentContext deploy() {
assertTrue("Application package submitted", application().latestVersion().isPresent());
assertFalse("Submission is not already deployed", application().instances().values().stream()
.anyMatch(instance -> instance.deployments().values().stream()
.anyMatch(deployment -> deployment.applicationVersion().equals(lastSubmission))));
assertEquals(application().latestVersion(), instance().change().application());
completeRollout();
assertFalse(instance().change().hasTargets());
return this;
}
/** Upgrade platform of this to given version */
public DeploymentContext deployPlatform(Version version) {
assertEquals(instance().change().platform().get(), version);
assertFalse(application().instances().values().stream()
.anyMatch(instance -> instance.deployments().values().stream()
.anyMatch(deployment -> deployment.version().equals(version))));
assertEquals(version, instance().change().platform().get());
assertFalse(instance().change().application().isPresent());
completeRollout();
assertTrue(application().productionDeployments().values().stream()
.allMatch(deployments -> deployments.stream()
.allMatch(deployment -> deployment.version().equals(version))));
for (var spec : application().deploymentSpec().instances())
for (JobType type : new DeploymentSteps(spec, tester.controller()::system).productionJobs())
assertTrue(tester.configServer().nodeRepository()
.list(type.zone(tester.controller().system()), applicationId.defaultInstance()).stream()
.allMatch(node -> node.currentVersion().equals(version)));
assertFalse(instance().change().hasTargets());
return this;
}
/** Defer provisioning of load balancers in zones in given environment */
public DeploymentContext deferLoadBalancerProvisioningIn(Environment... environment) {
configServer().deferLoadBalancerProvisioningIn(Set.of(environment));
return this;
}
/** Defer DNS updates */
public DeploymentContext deferDnsUpdates() {
deferDnsUpdates = true;
return this;
}
/** Flush all pending DNS updates */
public DeploymentContext flushDnsUpdates() {
flushDnsUpdates(Integer.MAX_VALUE);
assertTrue("All name service requests dispatched",
tester.controller().curator().readNameServiceQueue().requests().isEmpty());
return this;
}
/** Flush count pending DNS updates */
public DeploymentContext flushDnsUpdates(int count) {
var dispatcher = new NameServiceDispatcher(tester.controller(), Duration.ofDays(1),
new JobControl(tester.controller().curator()), count);
dispatcher.run();
return this;
}
/** Add a routing policy for this in given zone, with status set to inactive */
public DeploymentContext addInactiveRoutingPolicy(ZoneId zone) {
var clusterId = "default-inactive";
var id = new RoutingPolicyId(instanceId, ClusterSpec.Id.from(clusterId), zone);
var policies = new LinkedHashMap<>(tester.controller().curator().readRoutingPolicies(instanceId));
policies.put(id, new RoutingPolicy(id, HostName.from("lb-host"),
Optional.empty(),
Set.of(EndpointId.of("default")),
new Status(false, GlobalRouting.DEFAULT_STATUS)));
tester.controller().curator().writeRoutingPolicies(instanceId, policies);
return this;
}
/** Submit given application package for deployment */
public DeploymentContext submit(ApplicationPackage applicationPackage) {
return submit(applicationPackage, Optional.of(defaultSourceRevision));
}
/** Submit given application package for deployment */
public DeploymentContext submit(ApplicationPackage applicationPackage, Optional<SourceRevision> sourceRevision) {
var projectId = tester.controller().applications()
.requireApplication(applicationId)
.projectId()
.orElse(1000);
lastSubmission = jobs.submit(applicationId, sourceRevision, Optional.of("a@b"), Optional.empty(),
Optional.empty(), projectId, applicationPackage, new byte[0]);
return this;
}
/** Submit the default application package for deployment */
public DeploymentContext submit() {
return submit(tester.controller().system().isPublic() ? publicCdApplicationPackage : applicationPackage);
}
/** Trigger all outstanding jobs, if any */
public DeploymentContext triggerJobs() {
while (tester.controller().applications().deploymentTrigger().triggerReadyJobs() > 0);
return this;
}
/** Fail current deployment in given job */
public DeploymentContext outOfCapacity(JobType type) {
return failDeployment(type,
new ConfigServerException(URI.create("https:
"Failed to deploy application",
"Out of capacity",
ConfigServerException.ErrorCode.OUT_OF_CAPACITY,
new RuntimeException("Out of capacity from test code")));
}
/** Fail current deployment in given job */
public DeploymentContext failDeployment(JobType type) {
return failDeployment(type, new IllegalArgumentException("Exception from test code"));
}
/** Fail current deployment in given job */
private DeploymentContext failDeployment(JobType type, RuntimeException exception) {
triggerJobs();
var job = jobId(type);
RunId id = currentRun(job).id();
configServer().throwOnNextPrepare(exception);
runner.advance(currentRun(job));
assertTrue(jobs.run(id).get().hasFailed());
assertTrue(jobs.run(id).get().hasEnded());
return this;
}
/** Returns the last submitted application version */
public Optional<ApplicationVersion> lastSubmission() {
return Optional.ofNullable(lastSubmission);
}
/** Runs and returns all remaining jobs for the application, at most once, and asserts the current change is rolled out. */
public DeploymentContext completeRollout() {
triggerJobs();
Set<JobType> jobs = new HashSet<>();
List<Run> activeRuns;
while ( ! (activeRuns = this.jobs.active(applicationId)).isEmpty())
for (Run run : activeRuns)
if (jobs.add(run.id().type())) {
runJob(run.id().type());
triggerJobs();
}
else
throw new AssertionError("Job '" + run.id() + "' was run twice");
assertFalse("Change should have no targets, but was " + instance().change(), instance().change().hasTargets());
if (!deferDnsUpdates) {
flushDnsUpdates();
}
return this;
}
/** Runs a deployment of the given package to the given dev/perf job, on the given version. */
public DeploymentContext runJob(JobType type, ApplicationPackage applicationPackage, Version vespaVersion) {
jobs.deploy(instanceId, type, Optional.ofNullable(vespaVersion), applicationPackage);
return runJob(type);
}
/** Runs a deployment of the given package to the given dev/perf job. */
public DeploymentContext runJob(JobType type, ApplicationPackage applicationPackage) {
return runJob(type, applicationPackage, null);
}
/** Pulls the ready job trigger, and then runs the whole of the given job, successfully. */
public DeploymentContext runJob(JobType type) {
var job = jobId(type);
triggerJobs();
doDeploy(job);
if (job.type().isDeployment()) {
doUpgrade(job);
doConverge(job);
if (job.type().environment().isManuallyDeployed())
return this;
}
if (job.type().isTest())
doTests(job);
return this;
}
/** Abort the running job of the given type and. */
public DeploymentContext abortJob(JobType type) {
var job = jobId(type);
assertNotSame(RunStatus.aborted, currentRun(job).status());
jobs.abort(currentRun(job).id());
jobAborted(type);
return this;
}
/** Finish an already aborted run of the given type. */
public DeploymentContext jobAborted(JobType type) {
Run run = jobs.last(instanceId, type).get();
assertSame(RunStatus.aborted, run.status());
assertFalse(run.hasEnded());
runner.advance(run);
assertTrue(jobs.run(run.id()).get().hasEnded());
return this;
}
/** Simulate upgrade time out in given job */
public DeploymentContext timeOutUpgrade(JobType type) {
var job = jobId(type);
triggerJobs();
RunId id = currentRun(job).id();
doDeploy(job);
tester.clock().advance(InternalStepRunner.installationTimeout.plusSeconds(1));
runner.advance(currentRun(job));
assertTrue(jobs.run(id).get().hasFailed());
assertTrue(jobs.run(id).get().hasEnded());
return this;
}
/** Simulate convergence time out in given job */
public DeploymentContext timeOutConvergence(JobType type) {
var job = jobId(type);
triggerJobs();
RunId id = currentRun(job).id();
doDeploy(job);
doUpgrade(job);
tester.clock().advance(InternalStepRunner.installationTimeout.plusSeconds(1));
runner.advance(currentRun(job));
assertTrue(jobs.run(id).get().hasFailed());
assertTrue(jobs.run(id).get().hasEnded());
return this;
}
/** Deploy default application package, start a run for that change and return its ID */
public RunId newRun(JobType type) {
submit();
tester.readyJobsTrigger().maintain();
if (type.isProduction()) {
runJob(JobType.systemTest);
runJob(JobType.stagingTest);
tester.readyJobsTrigger().maintain();
}
Run run = jobs.active().stream()
.filter(r -> r.id().type() == type)
.findAny()
.orElseThrow(() -> new AssertionError(type + " is not among the active: " + jobs.active()));
return run.id();
}
/** Start tests in system test stage */
public void assertRunning(JobType type) {
assertTrue(jobId(type) + " should be among the active: " + jobs.active(),
jobs.active().stream().anyMatch(run -> run.id().application().equals(instanceId) && run.id().type() == type));
}
public void assertNotRunning(JobType type) {
assertFalse(jobId(type) + " should not be among the active: " + jobs.active(),
jobs.active().stream().anyMatch(run -> run.id().application().equals(instanceId) && run.id().type() == type));
}
/** Deploys tester and real app, and completes tester and initial staging installation first if needed. */
private void doDeploy(JobId job) {
RunId id = currentRun(job).id();
ZoneId zone = zone(job);
DeploymentId deployment = new DeploymentId(job.application(), zone);
runner.advance(currentRun(job));
if (job.type().isTest())
doInstallTester(job);
if (job.type() == JobType.stagingTest) {
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.installInitialReal));
Versions versions = currentRun(job).versions();
tester.configServer().nodeRepository().doUpgrade(deployment, Optional.empty(), versions.sourcePlatform().orElse(versions.targetPlatform()));
configServer().convergeServices(id.application(), zone);
runner.advance(currentRun(job));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installInitialReal));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installInitialReal));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installTester));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.startStagingSetup));
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.endStagingSetup));
tester.cloud().set(TesterCloud.Status.SUCCESS);
runner.advance(currentRun(job));
assertEquals(succeeded, jobs.run(id).get().stepStatuses().get(Step.endStagingSetup));
}
}
/** Upgrades nodes to target version. */
private void doUpgrade(JobId job) {
RunId id = currentRun(job).id();
ZoneId zone = zone(job);
DeploymentId deployment = new DeploymentId(job.application(), zone);
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.installReal));
configServer().nodeRepository().doUpgrade(deployment, Optional.empty(), currentRun(job).versions().targetPlatform());
runner.advance(currentRun(job));
}
/** Returns the current run for the given job type, and verifies it is still running normally. */
private Run currentRun(JobId job) {
Run run = jobs.last(job)
.filter(r -> r.id().type() == job.type())
.orElseThrow(() -> new AssertionError(job.type() + " is not among the active: " + jobs.active()));
assertFalse(run.id() + " should not have failed yet", run.hasFailed());
assertFalse(run.id() + " should not have ended yet", run.hasEnded());
return run;
}
/** Lets nodes converge on new application version. */
private void doConverge(JobId job) {
RunId id = currentRun(job).id();
ZoneId zone = zone(job);
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.installReal));
configServer().convergeServices(id.application(), zone);
runner.advance(currentRun(job));
if (job.type().environment().isManuallyDeployed()) {
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installReal));
assertTrue(jobs.run(id).get().hasEnded());
return;
}
assertEquals("Status of " + id, Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installReal));
}
/** Installs tester and starts tests. */
private void doInstallTester(JobId job) {
RunId id = currentRun(job).id();
ZoneId zone = zone(job);
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.installTester));
configServer().nodeRepository().doUpgrade(new DeploymentId(TesterId.of(job.application()).id(), zone), Optional.empty(), tester.controller().systemVersion());
runner.advance(currentRun(job));
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.installTester));
configServer().convergeServices(TesterId.of(id.application()).id(), zone);
runner.advance(currentRun(job));
assertEquals(succeeded, jobs.run(id).get().stepStatuses().get(Step.installTester));
runner.advance(currentRun(job));
}
/** Completes tests with success. */
private void doTests(JobId job) {
RunId id = currentRun(job).id();
ZoneId zone = zone(job);
if (job.type().isDeployment())
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installReal));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.installTester));
assertEquals(Step.Status.succeeded, jobs.run(id).get().stepStatuses().get(Step.startTests));
assertEquals(unfinished, jobs.run(id).get().stepStatuses().get(Step.endTests));
tester.cloud().set(TesterCloud.Status.SUCCESS);
runner.advance(currentRun(job));
assertTrue(jobs.run(id).get().hasEnded());
assertFalse(jobs.run(id).get().hasFailed());
assertEquals(job.type().isProduction(), instance().deployments().containsKey(zone));
assertTrue(configServer().nodeRepository().list(zone, TesterId.of(id.application()).id()).isEmpty());
}
private JobId jobId(JobType type) {
return new JobId(instanceId, type);
}
private ZoneId zone(JobId job) {
return job.type().zone(tester.controller().system());
}
private ConfigServerMock configServer() {
return tester.configServer();
}
private static X509Certificate generateCertificate() {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 256);
X500Principal subject = new X500Principal("CN=subject");
return X509CertificateBuilder.fromKeypair(keyPair,
subject,
Instant.now(),
Instant.now().plusSeconds(1),
SignatureAlgorithm.SHA512_WITH_ECDSA,
BigInteger.valueOf(1))
.build();
}
} |
This must match the implementation in the routing layer, unfortunately. | private static String sanitizeUpstream(String part) {
return truncate(part.toLowerCase()
.replace('_', '-')
.replaceAll("[^a-z0-9-]*", ""));
} | .replaceAll("[^a-z0-9-]*", "")); | private static String sanitizeUpstream(String part) {
return truncate(part.toLowerCase()
.replace('_', '-')
.replaceAll("[^a-z0-9-]*", ""));
} | class Endpoint {
private static final String YAHOO_DNS_SUFFIX = ".vespa.yahooapis.com";
private static final String OATH_DNS_SUFFIX = ".vespa.oath.cloud";
private static final String PUBLIC_DNS_SUFFIX = ".public.vespa.oath.cloud";
private static final String PUBLIC_CD_DNS_SUFFIX = ".public-cd.vespa.oath.cloud";
private final String name;
private final URI url;
private final Scope scope;
private final boolean legacy;
private final RoutingMethod routingMethod;
private final boolean tls;
private final boolean wildcard;
private Endpoint(String name, ApplicationId application, ZoneId zone, SystemName system, Port port, boolean legacy,
RoutingMethod routingMethod, boolean wildcard) {
Objects.requireNonNull(name, "name must be non-null");
Objects.requireNonNull(application, "application must be non-null");
Objects.requireNonNull(system, "system must be non-null");
Objects.requireNonNull(port, "port must be non-null");
Objects.requireNonNull(routingMethod, "routingMethod must be non-null");
this.name = name;
this.url = createUrl(name, application, zone, system, port, legacy, routingMethod);
this.scope = zone == null ? Scope.global : Scope.zone;
this.legacy = legacy;
this.routingMethod = routingMethod;
this.tls = port.tls;
this.wildcard = wildcard;
}
/**
* Returns the name of this endpoint (the first component of the DNS name). Depending on the endpoint type, this
* can be one of the following:
* - A wildcard (any scope)
* - A cluster name (only zone scope)
* - An endpoint ID (only global scope)
*/
public String name() {
return name;
}
/** Returns the URL used to access this */
public URI url() {
return url;
}
/** Returns the DNS name of this */
public String dnsName() {
return url.getAuthority().replaceAll(":.*", "");
}
/** Returns the scope of this */
public Scope scope() {
return scope;
}
/** Returns whether this is considered a legacy DNS name that is due for removal */
public boolean legacy() {
return legacy;
}
/** Returns the routing used for this */
public RoutingMethod routingMethod() {
return routingMethod;
}
/** Returns whether this endpoint supports TLS connections */
public boolean tls() {
return tls;
}
/** Returns whether this requires a rotation to be reachable */
public boolean requiresRotation() {
return routingMethod.isShared() && scope == Scope.global;
}
/** Returns whether this is a wildcard endpoint (used only in certificates) */
public boolean wildcard() {
return wildcard;
}
/** Returns the upstream ID of given deployment. This *must* match what the routing layer generates */
public String upstreamIdOf(DeploymentId deployment) {
if (scope != Scope.global) throw new IllegalArgumentException("Scope " + scope + " does not have upstream name");
if (!routingMethod.isShared()) throw new IllegalArgumentException("Routing method " + routingMethod + " does not have upstream name");
return upstreamIdOf(name, deployment.applicationId(), deployment.zoneId());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Endpoint endpoint = (Endpoint) o;
return url.equals(endpoint.url);
}
@Override
public int hashCode() {
return Objects.hash(url);
}
@Override
public String toString() {
return String.format("endpoint %s [scope=%s, legacy=%s, routingMethod=%s]", url, scope, legacy, routingMethod);
}
/** Returns the DNS suffix used for endpoints in given system */
public static String dnsSuffix(SystemName system) {
return dnsSuffix(system, false);
}
private static URI createUrl(String name, ApplicationId application, ZoneId zone, SystemName system,
Port port, boolean legacy, RoutingMethod routingMethod) {
String scheme = port.tls ? "https" : "http";
String separator = separator(system, routingMethod, port.tls);
String portPart = port.isDefault() ? "" : ":" + port.port;
return URI.create(scheme + ":
sanitize(namePart(name, separator)) +
systemPart(system, separator) +
sanitize(instancePart(application, separator)) +
sanitize(application.application().value()) +
separator +
sanitize(application.tenant().value()) +
"." +
scopePart(zone, legacy) +
dnsSuffix(system, legacy) +
portPart +
"/");
}
private static String sanitize(String part) {
return part.replace('_', '-');
}
private static String separator(SystemName system, RoutingMethod routingMethod, boolean tls) {
if (!tls) return ".";
if (routingMethod.isDirect()) return ".";
if (system.isPublic()) return ".";
return "--";
}
private static String namePart(String name, String separator) {
if ("default".equals(name)) return "";
return name + separator;
}
private static String scopePart(ZoneId zone, boolean legacy) {
if (zone == null) return "global";
if (!legacy && zone.environment().isProduction()) return zone.region().value();
return zone.region().value() + "." + zone.environment().value();
}
private static String instancePart(ApplicationId application, String separator) {
if (application.instance().isDefault()) return "";
return application.instance().value() + separator;
}
private static String systemPart(SystemName system, String separator) {
if (!system.isCd()) return "";
return system.value() + separator;
}
private static String dnsSuffix(SystemName system, boolean legacy) {
switch (system) {
case cd:
case main:
if (legacy) return YAHOO_DNS_SUFFIX;
return OATH_DNS_SUFFIX;
case Public:
return PUBLIC_DNS_SUFFIX;
case PublicCd:
return PUBLIC_CD_DNS_SUFFIX;
default: throw new IllegalArgumentException("No DNS suffix declared for system " + system);
}
}
private static String upstreamIdOf(String name, ApplicationId application, ZoneId zone) {
return Stream.of(namePart(name, ""),
instancePart(application, ""),
application.tenant().value(),
application.application().value(),
zone.region().value(),
zone.environment().value())
.filter(Predicate.not(String::isEmpty))
.map(Endpoint::sanitizeUpstream)
.collect(Collectors.joining("."));
}
/** Remove any invalid characters from a upstream part */
/** Truncate the given part at the front so its length does not exceed 63 characters */
private static String truncate(String part) {
return part.substring(Math.max(0, part.length() - 63));
}
/** An endpoint's scope */
public enum Scope {
/** Endpoint points to all zones */
global,
/** Endpoint points to a single zone */
zone,
}
/** Represents an endpoint's HTTP port */
public static class Port {
private final int port;
private final boolean tls;
private Port(int port, boolean tls) {
if (port < 1 || port > 65535) {
throw new IllegalArgumentException("Port must be between 1 and 65535, got " + port);
}
this.port = port;
this.tls = tls;
}
private boolean isDefault() {
return port == 80 || port == 443;
}
/** Returns the default HTTPS port */
public static Port tls() {
return new Port(443, true);
}
/** Returns default port for the given routing method */
public static Port fromRoutingMethod(RoutingMethod method) {
if (method.isDirect()) return Port.tls();
return Port.tls(4443);
}
/** Create a HTTPS port */
public static Port tls(int port) {
return new Port(port, true);
}
/** Create a HTTP port */
public static Port plain(int port) {
return new Port(port, false);
}
}
/** Build an endpoint for given application */
public static EndpointBuilder of(ApplicationId application) {
return new EndpointBuilder(application);
}
public static class EndpointBuilder {
private final ApplicationId application;
private ZoneId zone;
private ClusterSpec.Id cluster;
private EndpointId endpointId;
private Port port;
private RoutingMethod routingMethod = RoutingMethod.shared;
private boolean legacy = false;
private boolean wildcard = false;
private EndpointBuilder(ApplicationId application) {
this.application = application;
}
/** Sets the cluster target for this */
public EndpointBuilder target(ClusterSpec.Id cluster, ZoneId zone) {
if (endpointId != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.cluster = cluster;
this.zone = zone;
return this;
}
/** Sets the endpoint target ID for this (as defined in deployments.xml) */
public EndpointBuilder named(EndpointId endpointId) {
if (cluster != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.endpointId = endpointId;
return this;
}
/** Sets the global wildcard target for this */
public EndpointBuilder wildcard() {
if (endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.wildcard = true;
return this;
}
/** Sets the zone wildcard target for this */
public EndpointBuilder wildcard(ZoneId zone) {
if(endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.zone = zone;
this.wildcard = true;
return this;
}
/** Sets the port of this */
public EndpointBuilder on(Port port) {
this.port = port;
return this;
}
/** Marks this as a legacy endpoint */
public EndpointBuilder legacy() {
this.legacy = true;
return this;
}
/** Sets the routing method for this */
public EndpointBuilder routingMethod(RoutingMethod method) {
this.routingMethod = method;
return this;
}
/** Sets the system that owns this */
public Endpoint in(SystemName system) {
String name;
if (wildcard) {
name = "*";
} else if (endpointId != null) {
name = endpointId.id();
} else if (cluster != null) {
name = cluster.value();
} else {
throw new IllegalArgumentException("Must set either cluster, rotation or wildcard target");
}
if (system.isPublic() && routingMethod != RoutingMethod.exclusive) {
throw new IllegalArgumentException("Public system only supports routing method " + RoutingMethod.exclusive);
}
if (routingMethod.isDirect() && !port.isDefault()) {
throw new IllegalArgumentException("Routing method " + routingMethod + " can only use default port");
}
return new Endpoint(name, application, zone, system, port, legacy, routingMethod, wildcard);
}
}
} | class Endpoint {
private static final String YAHOO_DNS_SUFFIX = ".vespa.yahooapis.com";
private static final String OATH_DNS_SUFFIX = ".vespa.oath.cloud";
private static final String PUBLIC_DNS_SUFFIX = ".public.vespa.oath.cloud";
private static final String PUBLIC_CD_DNS_SUFFIX = ".public-cd.vespa.oath.cloud";
private final String name;
private final URI url;
private final Scope scope;
private final boolean legacy;
private final RoutingMethod routingMethod;
private final boolean tls;
private final boolean wildcard;
private Endpoint(String name, ApplicationId application, ZoneId zone, SystemName system, Port port, boolean legacy,
RoutingMethod routingMethod, boolean wildcard) {
Objects.requireNonNull(name, "name must be non-null");
Objects.requireNonNull(application, "application must be non-null");
Objects.requireNonNull(system, "system must be non-null");
Objects.requireNonNull(port, "port must be non-null");
Objects.requireNonNull(routingMethod, "routingMethod must be non-null");
this.name = name;
this.url = createUrl(name, application, zone, system, port, legacy, routingMethod);
this.scope = zone == null ? Scope.global : Scope.zone;
this.legacy = legacy;
this.routingMethod = routingMethod;
this.tls = port.tls;
this.wildcard = wildcard;
}
/**
* Returns the name of this endpoint (the first component of the DNS name). Depending on the endpoint type, this
* can be one of the following:
* - A wildcard (any scope)
* - A cluster name (only zone scope)
* - An endpoint ID (only global scope)
*/
public String name() {
return name;
}
/** Returns the URL used to access this */
public URI url() {
return url;
}
/** Returns the DNS name of this */
public String dnsName() {
return url.getAuthority().replaceAll(":.*", "");
}
/** Returns the scope of this */
public Scope scope() {
return scope;
}
/** Returns whether this is considered a legacy DNS name that is due for removal */
public boolean legacy() {
return legacy;
}
/** Returns the routing used for this */
public RoutingMethod routingMethod() {
return routingMethod;
}
/** Returns whether this endpoint supports TLS connections */
public boolean tls() {
return tls;
}
/** Returns whether this requires a rotation to be reachable */
public boolean requiresRotation() {
return routingMethod.isShared() && scope == Scope.global;
}
/** Returns whether this is a wildcard endpoint (used only in certificates) */
public boolean wildcard() {
return wildcard;
}
/** Returns the upstream ID of given deployment. This *must* match what the routing layer generates */
public String upstreamIdOf(DeploymentId deployment) {
if (scope != Scope.global) throw new IllegalArgumentException("Scope " + scope + " does not have upstream name");
if (!routingMethod.isShared()) throw new IllegalArgumentException("Routing method " + routingMethod + " does not have upstream name");
return upstreamIdOf(name, deployment.applicationId(), deployment.zoneId());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Endpoint endpoint = (Endpoint) o;
return url.equals(endpoint.url);
}
@Override
public int hashCode() {
return Objects.hash(url);
}
@Override
public String toString() {
return String.format("endpoint %s [scope=%s, legacy=%s, routingMethod=%s]", url, scope, legacy, routingMethod);
}
/** Returns the DNS suffix used for endpoints in given system */
public static String dnsSuffix(SystemName system) {
return dnsSuffix(system, false);
}
private static URI createUrl(String name, ApplicationId application, ZoneId zone, SystemName system,
Port port, boolean legacy, RoutingMethod routingMethod) {
String scheme = port.tls ? "https" : "http";
String separator = separator(system, routingMethod, port.tls);
String portPart = port.isDefault() ? "" : ":" + port.port;
return URI.create(scheme + ":
sanitize(namePart(name, separator)) +
systemPart(system, separator) +
sanitize(instancePart(application, separator)) +
sanitize(application.application().value()) +
separator +
sanitize(application.tenant().value()) +
"." +
scopePart(zone, legacy) +
dnsSuffix(system, legacy) +
portPart +
"/");
}
private static String sanitize(String part) {
return part.replace('_', '-');
}
private static String separator(SystemName system, RoutingMethod routingMethod, boolean tls) {
if (!tls) return ".";
if (routingMethod.isDirect()) return ".";
if (system.isPublic()) return ".";
return "--";
}
private static String namePart(String name, String separator) {
if ("default".equals(name)) return "";
return name + separator;
}
private static String scopePart(ZoneId zone, boolean legacy) {
if (zone == null) return "global";
if (!legacy && zone.environment().isProduction()) return zone.region().value();
return zone.region().value() + "." + zone.environment().value();
}
private static String instancePart(ApplicationId application, String separator) {
if (application.instance().isDefault()) return "";
return application.instance().value() + separator;
}
private static String systemPart(SystemName system, String separator) {
if (!system.isCd()) return "";
return system.value() + separator;
}
private static String dnsSuffix(SystemName system, boolean legacy) {
switch (system) {
case cd:
case main:
if (legacy) return YAHOO_DNS_SUFFIX;
return OATH_DNS_SUFFIX;
case Public:
return PUBLIC_DNS_SUFFIX;
case PublicCd:
return PUBLIC_CD_DNS_SUFFIX;
default: throw new IllegalArgumentException("No DNS suffix declared for system " + system);
}
}
private static String upstreamIdOf(String name, ApplicationId application, ZoneId zone) {
return Stream.of(namePart(name, ""),
instancePart(application, ""),
application.tenant().value(),
application.application().value(),
zone.region().value(),
zone.environment().value())
.filter(Predicate.not(String::isEmpty))
.map(Endpoint::sanitizeUpstream)
.collect(Collectors.joining("."));
}
/** Remove any invalid characters from a upstream part */
/** Truncate the given part at the front so its length does not exceed 63 characters */
private static String truncate(String part) {
return part.substring(Math.max(0, part.length() - 63));
}
/** An endpoint's scope */
public enum Scope {
/** Endpoint points to all zones */
global,
/** Endpoint points to a single zone */
zone,
}
/** Represents an endpoint's HTTP port */
public static class Port {
private final int port;
private final boolean tls;
private Port(int port, boolean tls) {
if (port < 1 || port > 65535) {
throw new IllegalArgumentException("Port must be between 1 and 65535, got " + port);
}
this.port = port;
this.tls = tls;
}
private boolean isDefault() {
return port == 80 || port == 443;
}
/** Returns the default HTTPS port */
public static Port tls() {
return new Port(443, true);
}
/** Returns default port for the given routing method */
public static Port fromRoutingMethod(RoutingMethod method) {
if (method.isDirect()) return Port.tls();
return Port.tls(4443);
}
/** Create a HTTPS port */
public static Port tls(int port) {
return new Port(port, true);
}
/** Create a HTTP port */
public static Port plain(int port) {
return new Port(port, false);
}
}
/** Build an endpoint for given application */
public static EndpointBuilder of(ApplicationId application) {
return new EndpointBuilder(application);
}
public static class EndpointBuilder {
private final ApplicationId application;
private ZoneId zone;
private ClusterSpec.Id cluster;
private EndpointId endpointId;
private Port port;
private RoutingMethod routingMethod = RoutingMethod.shared;
private boolean legacy = false;
private boolean wildcard = false;
private EndpointBuilder(ApplicationId application) {
this.application = application;
}
/** Sets the cluster target for this */
public EndpointBuilder target(ClusterSpec.Id cluster, ZoneId zone) {
if (endpointId != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.cluster = cluster;
this.zone = zone;
return this;
}
/** Sets the endpoint target ID for this (as defined in deployments.xml) */
public EndpointBuilder named(EndpointId endpointId) {
if (cluster != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.endpointId = endpointId;
return this;
}
/** Sets the global wildcard target for this */
public EndpointBuilder wildcard() {
if (endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.wildcard = true;
return this;
}
/** Sets the zone wildcard target for this */
public EndpointBuilder wildcard(ZoneId zone) {
if(endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.zone = zone;
this.wildcard = true;
return this;
}
/** Sets the port of this */
public EndpointBuilder on(Port port) {
this.port = port;
return this;
}
/** Marks this as a legacy endpoint */
public EndpointBuilder legacy() {
this.legacy = true;
return this;
}
/** Sets the routing method for this */
public EndpointBuilder routingMethod(RoutingMethod method) {
this.routingMethod = method;
return this;
}
/** Sets the system that owns this */
public Endpoint in(SystemName system) {
String name;
if (wildcard) {
name = "*";
} else if (endpointId != null) {
name = endpointId.id();
} else if (cluster != null) {
name = cluster.value();
} else {
throw new IllegalArgumentException("Must set either cluster, rotation or wildcard target");
}
if (system.isPublic() && routingMethod != RoutingMethod.exclusive) {
throw new IllegalArgumentException("Public system only supports routing method " + RoutingMethod.exclusive);
}
if (routingMethod.isDirect() && !port.isDefault()) {
throw new IllegalArgumentException("Routing method " + routingMethod + " can only use default port");
}
return new Endpoint(name, application, zone, system, port, legacy, routingMethod, wildcard);
}
}
} |
Sorry I didn't catch this >_< | private static String upstreamIdOf(String name, ApplicationId application, ZoneId zone) {
return Stream.of(namePart(name, ""),
instancePart(application, ""),
application.application().value(),
application.tenant().value(),
zone.region().value(),
zone.environment().value())
.filter(Predicate.not(String::isEmpty))
.map(Endpoint::sanitizeUpstream)
.collect(Collectors.joining("."));
} | application.tenant().value(), | private static String upstreamIdOf(String name, ApplicationId application, ZoneId zone) {
return Stream.of(namePart(name, ""),
instancePart(application, ""),
application.application().value(),
application.tenant().value(),
zone.region().value(),
zone.environment().value())
.filter(Predicate.not(String::isEmpty))
.map(Endpoint::sanitizeUpstream)
.collect(Collectors.joining("."));
} | class Endpoint {
private static final String YAHOO_DNS_SUFFIX = ".vespa.yahooapis.com";
private static final String OATH_DNS_SUFFIX = ".vespa.oath.cloud";
private static final String PUBLIC_DNS_SUFFIX = ".public.vespa.oath.cloud";
private static final String PUBLIC_CD_DNS_SUFFIX = ".public-cd.vespa.oath.cloud";
private final String name;
private final URI url;
private final Scope scope;
private final boolean legacy;
private final RoutingMethod routingMethod;
private final boolean tls;
private final boolean wildcard;
private Endpoint(String name, ApplicationId application, ZoneId zone, SystemName system, Port port, boolean legacy,
RoutingMethod routingMethod, boolean wildcard) {
Objects.requireNonNull(name, "name must be non-null");
Objects.requireNonNull(application, "application must be non-null");
Objects.requireNonNull(system, "system must be non-null");
Objects.requireNonNull(port, "port must be non-null");
Objects.requireNonNull(routingMethod, "routingMethod must be non-null");
this.name = name;
this.url = createUrl(name, application, zone, system, port, legacy, routingMethod);
this.scope = zone == null ? Scope.global : Scope.zone;
this.legacy = legacy;
this.routingMethod = routingMethod;
this.tls = port.tls;
this.wildcard = wildcard;
}
/**
* Returns the name of this endpoint (the first component of the DNS name). Depending on the endpoint type, this
* can be one of the following:
* - A wildcard (any scope)
* - A cluster name (only zone scope)
* - An endpoint ID (only global scope)
*/
public String name() {
return name;
}
/** Returns the URL used to access this */
public URI url() {
return url;
}
/** Returns the DNS name of this */
public String dnsName() {
return url.getAuthority().replaceAll(":.*", "");
}
/** Returns the scope of this */
public Scope scope() {
return scope;
}
/** Returns whether this is considered a legacy DNS name that is due for removal */
public boolean legacy() {
return legacy;
}
/** Returns the routing used for this */
public RoutingMethod routingMethod() {
return routingMethod;
}
/** Returns whether this endpoint supports TLS connections */
public boolean tls() {
return tls;
}
/** Returns whether this requires a rotation to be reachable */
public boolean requiresRotation() {
return routingMethod.isShared() && scope == Scope.global;
}
/** Returns whether this is a wildcard endpoint (used only in certificates) */
public boolean wildcard() {
return wildcard;
}
/** Returns the upstream ID of given deployment. This *must* match what the routing layer generates */
public String upstreamIdOf(DeploymentId deployment) {
if (scope != Scope.global) throw new IllegalArgumentException("Scope " + scope + " does not have upstream name");
if (!routingMethod.isShared()) throw new IllegalArgumentException("Routing method " + routingMethod + " does not have upstream name");
return upstreamIdOf(name, deployment.applicationId(), deployment.zoneId());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Endpoint endpoint = (Endpoint) o;
return url.equals(endpoint.url) &&
routingMethod == endpoint.routingMethod;
}
@Override
public int hashCode() {
return Objects.hash(url, routingMethod);
}
@Override
public String toString() {
return String.format("endpoint %s [scope=%s, legacy=%s, routingMethod=%s]", url, scope, legacy, routingMethod);
}
/** Returns the DNS suffix used for endpoints in given system */
public static String dnsSuffix(SystemName system) {
return dnsSuffix(system, false);
}
private static URI createUrl(String name, ApplicationId application, ZoneId zone, SystemName system,
Port port, boolean legacy, RoutingMethod routingMethod) {
String scheme = port.tls ? "https" : "http";
String separator = separator(system, routingMethod, port.tls);
String portPart = port.isDefault() ? "" : ":" + port.port;
return URI.create(scheme + ":
sanitize(namePart(name, separator)) +
systemPart(system, separator) +
sanitize(instancePart(application, separator)) +
sanitize(application.application().value()) +
separator +
sanitize(application.tenant().value()) +
"." +
scopePart(zone, legacy) +
dnsSuffix(system, legacy) +
portPart +
"/");
}
private static String sanitize(String part) {
return part.replace('_', '-');
}
private static String separator(SystemName system, RoutingMethod routingMethod, boolean tls) {
if (!tls) return ".";
if (routingMethod.isDirect()) return ".";
if (system.isPublic()) return ".";
return "--";
}
private static String namePart(String name, String separator) {
if ("default".equals(name)) return "";
return name + separator;
}
private static String scopePart(ZoneId zone, boolean legacy) {
if (zone == null) return "global";
if (!legacy && zone.environment().isProduction()) return zone.region().value();
return zone.region().value() + "." + zone.environment().value();
}
private static String instancePart(ApplicationId application, String separator) {
if (application.instance().isDefault()) return "";
return application.instance().value() + separator;
}
private static String systemPart(SystemName system, String separator) {
if (!system.isCd()) return "";
return system.value() + separator;
}
private static String dnsSuffix(SystemName system, boolean legacy) {
switch (system) {
case cd:
case main:
if (legacy) return YAHOO_DNS_SUFFIX;
return OATH_DNS_SUFFIX;
case Public:
return PUBLIC_DNS_SUFFIX;
case PublicCd:
return PUBLIC_CD_DNS_SUFFIX;
default: throw new IllegalArgumentException("No DNS suffix declared for system " + system);
}
}
/** Remove any invalid characters from a upstream part */
private static String sanitizeUpstream(String part) {
return truncate(part.toLowerCase()
.replace('_', '-')
.replaceAll("[^a-z0-9-]*", ""));
}
/** Truncate the given part at the front so its length does not exceed 63 characters */
private static String truncate(String part) {
return part.substring(Math.max(0, part.length() - 63));
}
/** An endpoint's scope */
public enum Scope {
/** Endpoint points to all zones */
global,
/** Endpoint points to a single zone */
zone,
}
/** Represents an endpoint's HTTP port */
public static class Port {
private final int port;
private final boolean tls;
private Port(int port, boolean tls) {
if (port < 1 || port > 65535) {
throw new IllegalArgumentException("Port must be between 1 and 65535, got " + port);
}
this.port = port;
this.tls = tls;
}
private boolean isDefault() {
return port == 80 || port == 443;
}
/** Returns the default HTTPS port */
public static Port tls() {
return new Port(443, true);
}
/** Returns default port for the given routing method */
public static Port fromRoutingMethod(RoutingMethod method) {
if (method.isDirect()) return Port.tls();
return Port.tls(4443);
}
/** Create a HTTPS port */
public static Port tls(int port) {
return new Port(port, true);
}
/** Create a HTTP port */
public static Port plain(int port) {
return new Port(port, false);
}
}
/** Build an endpoint for given application */
public static EndpointBuilder of(ApplicationId application) {
return new EndpointBuilder(application);
}
public static class EndpointBuilder {
private final ApplicationId application;
private ZoneId zone;
private ClusterSpec.Id cluster;
private EndpointId endpointId;
private Port port;
private RoutingMethod routingMethod = RoutingMethod.shared;
private boolean legacy = false;
private boolean wildcard = false;
private EndpointBuilder(ApplicationId application) {
this.application = application;
}
/** Sets the cluster target for this */
public EndpointBuilder target(ClusterSpec.Id cluster, ZoneId zone) {
if (endpointId != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.cluster = cluster;
this.zone = zone;
return this;
}
/** Sets the endpoint target ID for this (as defined in deployments.xml) */
public EndpointBuilder named(EndpointId endpointId) {
if (cluster != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.endpointId = endpointId;
return this;
}
/** Sets the global wildcard target for this */
public EndpointBuilder wildcard() {
if (endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.wildcard = true;
return this;
}
/** Sets the zone wildcard target for this */
public EndpointBuilder wildcard(ZoneId zone) {
if(endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.zone = zone;
this.wildcard = true;
return this;
}
/** Sets the port of this */
public EndpointBuilder on(Port port) {
this.port = port;
return this;
}
/** Marks this as a legacy endpoint */
public EndpointBuilder legacy() {
this.legacy = true;
return this;
}
/** Sets the routing method for this */
public EndpointBuilder routingMethod(RoutingMethod method) {
this.routingMethod = method;
return this;
}
/** Sets the system that owns this */
public Endpoint in(SystemName system) {
String name;
if (wildcard) {
name = "*";
} else if (endpointId != null) {
name = endpointId.id();
} else if (cluster != null) {
name = cluster.value();
} else {
throw new IllegalArgumentException("Must set either cluster, rotation or wildcard target");
}
if (system.isPublic() && routingMethod != RoutingMethod.exclusive) {
throw new IllegalArgumentException("Public system only supports routing method " + RoutingMethod.exclusive);
}
if (routingMethod.isDirect() && !port.isDefault()) {
throw new IllegalArgumentException("Routing method " + routingMethod + " can only use default port");
}
return new Endpoint(name, application, zone, system, port, legacy, routingMethod, wildcard);
}
}
} | class Endpoint {
private static final String YAHOO_DNS_SUFFIX = ".vespa.yahooapis.com";
private static final String OATH_DNS_SUFFIX = ".vespa.oath.cloud";
private static final String PUBLIC_DNS_SUFFIX = ".public.vespa.oath.cloud";
private static final String PUBLIC_CD_DNS_SUFFIX = ".public-cd.vespa.oath.cloud";
private final String name;
private final URI url;
private final Scope scope;
private final boolean legacy;
private final RoutingMethod routingMethod;
private final boolean tls;
private final boolean wildcard;
private Endpoint(String name, ApplicationId application, ZoneId zone, SystemName system, Port port, boolean legacy,
RoutingMethod routingMethod, boolean wildcard) {
Objects.requireNonNull(name, "name must be non-null");
Objects.requireNonNull(application, "application must be non-null");
Objects.requireNonNull(system, "system must be non-null");
Objects.requireNonNull(port, "port must be non-null");
Objects.requireNonNull(routingMethod, "routingMethod must be non-null");
this.name = name;
this.url = createUrl(name, application, zone, system, port, legacy, routingMethod);
this.scope = zone == null ? Scope.global : Scope.zone;
this.legacy = legacy;
this.routingMethod = routingMethod;
this.tls = port.tls;
this.wildcard = wildcard;
}
/**
* Returns the name of this endpoint (the first component of the DNS name). Depending on the endpoint type, this
* can be one of the following:
* - A wildcard (any scope)
* - A cluster name (only zone scope)
* - An endpoint ID (only global scope)
*/
public String name() {
return name;
}
/** Returns the URL used to access this */
public URI url() {
return url;
}
/** Returns the DNS name of this */
public String dnsName() {
return url.getAuthority().replaceAll(":.*", "");
}
/** Returns the scope of this */
public Scope scope() {
return scope;
}
/** Returns whether this is considered a legacy DNS name that is due for removal */
public boolean legacy() {
return legacy;
}
/** Returns the routing used for this */
public RoutingMethod routingMethod() {
return routingMethod;
}
/** Returns whether this endpoint supports TLS connections */
public boolean tls() {
return tls;
}
/** Returns whether this requires a rotation to be reachable */
public boolean requiresRotation() {
return routingMethod.isShared() && scope == Scope.global;
}
/** Returns whether this is a wildcard endpoint (used only in certificates) */
public boolean wildcard() {
return wildcard;
}
/** Returns the upstream ID of given deployment. This *must* match what the routing layer generates */
public String upstreamIdOf(DeploymentId deployment) {
if (scope != Scope.global) throw new IllegalArgumentException("Scope " + scope + " does not have upstream name");
if (!routingMethod.isShared()) throw new IllegalArgumentException("Routing method " + routingMethod + " does not have upstream name");
return upstreamIdOf(name, deployment.applicationId(), deployment.zoneId());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Endpoint endpoint = (Endpoint) o;
return url.equals(endpoint.url) &&
routingMethod == endpoint.routingMethod;
}
@Override
public int hashCode() {
return Objects.hash(url, routingMethod);
}
@Override
public String toString() {
return String.format("endpoint %s [scope=%s, legacy=%s, routingMethod=%s]", url, scope, legacy, routingMethod);
}
/** Returns the DNS suffix used for endpoints in given system */
public static String dnsSuffix(SystemName system) {
return dnsSuffix(system, false);
}
private static URI createUrl(String name, ApplicationId application, ZoneId zone, SystemName system,
Port port, boolean legacy, RoutingMethod routingMethod) {
String scheme = port.tls ? "https" : "http";
String separator = separator(system, routingMethod, port.tls);
String portPart = port.isDefault() ? "" : ":" + port.port;
return URI.create(scheme + ":
sanitize(namePart(name, separator)) +
systemPart(system, separator) +
sanitize(instancePart(application, separator)) +
sanitize(application.application().value()) +
separator +
sanitize(application.tenant().value()) +
"." +
scopePart(zone, legacy) +
dnsSuffix(system, legacy) +
portPart +
"/");
}
private static String sanitize(String part) {
return part.replace('_', '-');
}
private static String separator(SystemName system, RoutingMethod routingMethod, boolean tls) {
if (!tls) return ".";
if (routingMethod.isDirect()) return ".";
if (system.isPublic()) return ".";
return "--";
}
private static String namePart(String name, String separator) {
if ("default".equals(name)) return "";
return name + separator;
}
private static String scopePart(ZoneId zone, boolean legacy) {
if (zone == null) return "global";
if (!legacy && zone.environment().isProduction()) return zone.region().value();
return zone.region().value() + "." + zone.environment().value();
}
private static String instancePart(ApplicationId application, String separator) {
if (application.instance().isDefault()) return "";
return application.instance().value() + separator;
}
private static String systemPart(SystemName system, String separator) {
if (!system.isCd()) return "";
return system.value() + separator;
}
private static String dnsSuffix(SystemName system, boolean legacy) {
switch (system) {
case cd:
case main:
if (legacy) return YAHOO_DNS_SUFFIX;
return OATH_DNS_SUFFIX;
case Public:
return PUBLIC_DNS_SUFFIX;
case PublicCd:
return PUBLIC_CD_DNS_SUFFIX;
default: throw new IllegalArgumentException("No DNS suffix declared for system " + system);
}
}
/** Remove any invalid characters from a upstream part */
private static String sanitizeUpstream(String part) {
return truncate(part.toLowerCase()
.replace('_', '-')
.replaceAll("[^a-z0-9-]*", ""));
}
/** Truncate the given part at the front so its length does not exceed 63 characters */
private static String truncate(String part) {
return part.substring(Math.max(0, part.length() - 63));
}
/** An endpoint's scope */
public enum Scope {
/** Endpoint points to all zones */
global,
/** Endpoint points to a single zone */
zone,
}
/** Represents an endpoint's HTTP port */
public static class Port {
private final int port;
private final boolean tls;
private Port(int port, boolean tls) {
if (port < 1 || port > 65535) {
throw new IllegalArgumentException("Port must be between 1 and 65535, got " + port);
}
this.port = port;
this.tls = tls;
}
private boolean isDefault() {
return port == 80 || port == 443;
}
/** Returns the default HTTPS port */
public static Port tls() {
return new Port(443, true);
}
/** Returns default port for the given routing method */
public static Port fromRoutingMethod(RoutingMethod method) {
if (method.isDirect()) return Port.tls();
return Port.tls(4443);
}
/** Create a HTTPS port */
public static Port tls(int port) {
return new Port(port, true);
}
/** Create a HTTP port */
public static Port plain(int port) {
return new Port(port, false);
}
}
/** Build an endpoint for given application */
public static EndpointBuilder of(ApplicationId application) {
return new EndpointBuilder(application);
}
public static class EndpointBuilder {
private final ApplicationId application;
private ZoneId zone;
private ClusterSpec.Id cluster;
private EndpointId endpointId;
private Port port;
private RoutingMethod routingMethod = RoutingMethod.shared;
private boolean legacy = false;
private boolean wildcard = false;
private EndpointBuilder(ApplicationId application) {
this.application = application;
}
/** Sets the cluster target for this */
public EndpointBuilder target(ClusterSpec.Id cluster, ZoneId zone) {
if (endpointId != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.cluster = cluster;
this.zone = zone;
return this;
}
/** Sets the endpoint target ID for this (as defined in deployments.xml) */
public EndpointBuilder named(EndpointId endpointId) {
if (cluster != null || wildcard) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.endpointId = endpointId;
return this;
}
/** Sets the global wildcard target for this */
public EndpointBuilder wildcard() {
if (endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.wildcard = true;
return this;
}
/** Sets the zone wildcard target for this */
public EndpointBuilder wildcard(ZoneId zone) {
if(endpointId != null || cluster != null) {
throw new IllegalArgumentException("Cannot set multiple target types");
}
this.zone = zone;
this.wildcard = true;
return this;
}
/** Sets the port of this */
public EndpointBuilder on(Port port) {
this.port = port;
return this;
}
/** Marks this as a legacy endpoint */
public EndpointBuilder legacy() {
this.legacy = true;
return this;
}
/** Sets the routing method for this */
public EndpointBuilder routingMethod(RoutingMethod method) {
this.routingMethod = method;
return this;
}
/** Sets the system that owns this */
public Endpoint in(SystemName system) {
String name;
if (wildcard) {
name = "*";
} else if (endpointId != null) {
name = endpointId.id();
} else if (cluster != null) {
name = cluster.value();
} else {
throw new IllegalArgumentException("Must set either cluster, rotation or wildcard target");
}
if (system.isPublic() && routingMethod != RoutingMethod.exclusive) {
throw new IllegalArgumentException("Public system only supports routing method " + RoutingMethod.exclusive);
}
if (routingMethod.isDirect() && !port.isDefault()) {
throw new IllegalArgumentException("Routing method " + routingMethod + " can only use default port");
}
return new Endpoint(name, application, zone, system, port, legacy, routingMethod, wildcard);
}
}
} |
Most of the time `oldHostnames` and `hostnames` will be equal, maybe consider adding optimization to skip all that removal and addition if that is the case? | public void add(ApplicationInfo applicationInfo) {
ApplicationInfo oldApplicationInfo = applicationsById.put(applicationInfo.getApplicationId(), applicationInfo);
final String logPrefix;
if (oldApplicationInfo == null) {
logPrefix = isComplete ? "New application " : "Bootstrapped application ";
} else {
logPrefix = isComplete ? "Reactivated application " : "Rebootstrapped application ";
}
logger.log(LogLevel.INFO, logPrefix + applicationInfo.getApplicationId());
Set<HostName> oldHostnames = hostnamesById.remove(applicationInfo.getApplicationId());
if (oldHostnames != null) {
oldHostnames.forEach(applicationsByHostname::remove);
}
Set<HostName> hostnames = applicationInfo.getModel().getHosts().stream()
.map(HostInfo::getHostname)
.map(HostName::from)
.collect(Collectors.toSet());
hostnamesById.put(applicationInfo.getApplicationId(), hostnames);
hostnames.forEach(hostname -> applicationsByHostname.put(hostname, applicationInfo));
listeners.forEach(listener -> listener.applicationActivated(applicationInfo));
} | Set<HostName> oldHostnames = hostnamesById.remove(applicationInfo.getApplicationId()); | public void add(ApplicationInfo applicationInfo) {
ApplicationId id = applicationInfo.getApplicationId();
ApplicationInfo oldApplicationInfo = applicationsById.put(id, applicationInfo);
final String logPrefix;
if (oldApplicationInfo == null) {
logPrefix = isComplete ? "New application " : "Bootstrapped application ";
} else {
logPrefix = isComplete ? "Reactivated application " : "Rebootstrapped application ";
}
logger.log(LogLevel.INFO, logPrefix + id);
Set<HostName> hostnames = hostnamesById.computeIfAbsent(id, k -> new HashSet<>());
Set<HostName> removedHosts = new HashSet<>(hostnames);
applicationInfo.getModel().getHosts().stream()
.map(HostInfo::getHostname)
.map(HostName::from)
.forEach(hostname -> {
if (!removedHosts.remove(hostname)) {
hostnames.add(hostname);
ApplicationId previousId = idsByHostname.put(hostname, id);
if (previousId != null && !previousId.equals(id)) {
logger.log(LogLevel.WARNING, hostname + " has been reassigned from " +
previousId + " to " + id);
Set<HostName> previousHostnames = hostnamesById.get(previousId);
if (previousHostnames != null) {
previousHostnames.remove(hostname);
}
}
}
});
removedHosts.forEach(idsByHostname::remove);
listeners.forEach(listener -> listener.applicationActivated(applicationInfo));
} | class DuperModel {
private static Logger logger = Logger.getLogger(DuperModel.class.getName());
private final Map<ApplicationId, ApplicationInfo> applicationsById = new HashMap<>();
private final Map<HostName, ApplicationInfo> applicationsByHostname = new HashMap<>();
private final Map<ApplicationId, Set<HostName>> hostnamesById = new HashMap<>();
private final List<DuperModelListener> listeners = new ArrayList<>();
private boolean isComplete = false;
public void registerListener(DuperModelListener listener) {
applicationsById.values().forEach(listener::applicationActivated);
listeners.add(listener);
}
void setComplete() {
if (!isComplete) {
logger.log(LogLevel.INFO, "Bootstrap done - duper model is complete");
isComplete = true;
listeners.forEach(DuperModelListener::bootstrapComplete);
}
}
public boolean isComplete() { return isComplete; }
public int numberOfApplications() {
return applicationsById.size();
}
public int numberOfHosts() {
return applicationsByHostname.size();
}
public boolean contains(ApplicationId applicationId) {
return applicationsById.containsKey(applicationId);
}
public Optional<ApplicationInfo> getApplicationInfo(ApplicationId applicationId) {
return Optional.ofNullable(applicationsById.get(applicationId));
}
public Optional<ApplicationInfo> getApplicationInfo(HostName hostName) {
return Optional.ofNullable(applicationsByHostname.get(hostName));
}
public List<ApplicationInfo> getApplicationInfos() {
return List.copyOf(applicationsById.values());
}
public void remove(ApplicationId applicationId) {
Set<HostName> hostnames = hostnamesById.remove(applicationId);
if (hostnames != null) {
hostnames.forEach(applicationsByHostname::remove);
}
ApplicationInfo application = applicationsById.remove(applicationId);
if (application != null) {
logger.log(LogLevel.INFO, "Removed application " + applicationId);
listeners.forEach(listener -> listener.applicationRemoved(applicationId));
}
}
} | class DuperModel {
private static Logger logger = Logger.getLogger(DuperModel.class.getName());
private final Map<ApplicationId, ApplicationInfo> applicationsById = new HashMap<>();
private final Map<HostName, ApplicationId> idsByHostname = new HashMap<>();
private final Map<ApplicationId, Set<HostName>> hostnamesById = new HashMap<>();
private final List<DuperModelListener> listeners = new ArrayList<>();
private boolean isComplete = false;
public void registerListener(DuperModelListener listener) {
applicationsById.values().forEach(listener::applicationActivated);
listeners.add(listener);
}
void setComplete() {
if (!isComplete) {
logger.log(LogLevel.INFO, "Bootstrap done - duper model is complete");
isComplete = true;
listeners.forEach(DuperModelListener::bootstrapComplete);
}
}
public boolean isComplete() { return isComplete; }
public int numberOfApplications() {
return applicationsById.size();
}
public int numberOfHosts() {
return idsByHostname.size();
}
public boolean contains(ApplicationId applicationId) {
return applicationsById.containsKey(applicationId);
}
public Optional<ApplicationInfo> getApplicationInfo(ApplicationId applicationId) {
return Optional.ofNullable(applicationsById.get(applicationId));
}
public Optional<ApplicationInfo> getApplicationInfo(HostName hostName) {
return Optional.ofNullable(idsByHostname.get(hostName)).map(applicationsById::get);
}
public List<ApplicationInfo> getApplicationInfos() {
return List.copyOf(applicationsById.values());
}
public void remove(ApplicationId applicationId) {
Set<HostName> hostnames = hostnamesById.remove(applicationId);
if (hostnames != null) {
hostnames.forEach(idsByHostname::remove);
}
ApplicationInfo application = applicationsById.remove(applicationId);
if (application != null) {
logger.log(LogLevel.INFO, "Removed application " + applicationId);
listeners.forEach(listener -> listener.applicationRemoved(applicationId));
}
}
} |
Something like the following? ``` HashSet<HostName> hostnames = hostnamesById .computeIfAbsent(applicationInfo.getApplicationId(), k -> new HashSet<>()); Set<HostName> newHostnames; Set<HostName> removedHostnames; { newHostnames = applicationInfo.getModel().getHosts().stream() .map(HostInfo::getHostname) .map(HostName::from) .collect(Collectors.toCollection(HashSet::new)); removedHostnames = new HashSet<>(hostnames); removedHostnames.removeAll(newHostnames); newHostnames.removeAll(hostnames); } // These corrects applicationById hostnames.removeAll(removedHostnames); hostnames.addAll(newHostnames); // These corrects applicationByHostname removedHostnames.forEach(applicationsByHostname::remove); newHostnames.forEach(hostname -> applicationsByHostname.put(hostname, applicationInfo)); ``` This will create more objects (1 additional HashSet) and will have one additional O(n) operation (removeAll) compared to current. In addition, I would have to make applicationsByHostname map to ApplicationId instead of ApplicationInfo, otherwise unchanged hostnames may point to old ApplicationInfo. Which would incur a double map lookup to get ApplicationInfo instead of a single lookup. | public void add(ApplicationInfo applicationInfo) {
ApplicationInfo oldApplicationInfo = applicationsById.put(applicationInfo.getApplicationId(), applicationInfo);
final String logPrefix;
if (oldApplicationInfo == null) {
logPrefix = isComplete ? "New application " : "Bootstrapped application ";
} else {
logPrefix = isComplete ? "Reactivated application " : "Rebootstrapped application ";
}
logger.log(LogLevel.INFO, logPrefix + applicationInfo.getApplicationId());
Set<HostName> oldHostnames = hostnamesById.remove(applicationInfo.getApplicationId());
if (oldHostnames != null) {
oldHostnames.forEach(applicationsByHostname::remove);
}
Set<HostName> hostnames = applicationInfo.getModel().getHosts().stream()
.map(HostInfo::getHostname)
.map(HostName::from)
.collect(Collectors.toSet());
hostnamesById.put(applicationInfo.getApplicationId(), hostnames);
hostnames.forEach(hostname -> applicationsByHostname.put(hostname, applicationInfo));
listeners.forEach(listener -> listener.applicationActivated(applicationInfo));
} | Set<HostName> oldHostnames = hostnamesById.remove(applicationInfo.getApplicationId()); | public void add(ApplicationInfo applicationInfo) {
ApplicationId id = applicationInfo.getApplicationId();
ApplicationInfo oldApplicationInfo = applicationsById.put(id, applicationInfo);
final String logPrefix;
if (oldApplicationInfo == null) {
logPrefix = isComplete ? "New application " : "Bootstrapped application ";
} else {
logPrefix = isComplete ? "Reactivated application " : "Rebootstrapped application ";
}
logger.log(LogLevel.INFO, logPrefix + id);
Set<HostName> hostnames = hostnamesById.computeIfAbsent(id, k -> new HashSet<>());
Set<HostName> removedHosts = new HashSet<>(hostnames);
applicationInfo.getModel().getHosts().stream()
.map(HostInfo::getHostname)
.map(HostName::from)
.forEach(hostname -> {
if (!removedHosts.remove(hostname)) {
hostnames.add(hostname);
ApplicationId previousId = idsByHostname.put(hostname, id);
if (previousId != null && !previousId.equals(id)) {
logger.log(LogLevel.WARNING, hostname + " has been reassigned from " +
previousId + " to " + id);
Set<HostName> previousHostnames = hostnamesById.get(previousId);
if (previousHostnames != null) {
previousHostnames.remove(hostname);
}
}
}
});
removedHosts.forEach(idsByHostname::remove);
listeners.forEach(listener -> listener.applicationActivated(applicationInfo));
} | class DuperModel {
private static Logger logger = Logger.getLogger(DuperModel.class.getName());
private final Map<ApplicationId, ApplicationInfo> applicationsById = new HashMap<>();
private final Map<HostName, ApplicationInfo> applicationsByHostname = new HashMap<>();
private final Map<ApplicationId, Set<HostName>> hostnamesById = new HashMap<>();
private final List<DuperModelListener> listeners = new ArrayList<>();
private boolean isComplete = false;
public void registerListener(DuperModelListener listener) {
applicationsById.values().forEach(listener::applicationActivated);
listeners.add(listener);
}
void setComplete() {
if (!isComplete) {
logger.log(LogLevel.INFO, "Bootstrap done - duper model is complete");
isComplete = true;
listeners.forEach(DuperModelListener::bootstrapComplete);
}
}
public boolean isComplete() { return isComplete; }
public int numberOfApplications() {
return applicationsById.size();
}
public int numberOfHosts() {
return applicationsByHostname.size();
}
public boolean contains(ApplicationId applicationId) {
return applicationsById.containsKey(applicationId);
}
public Optional<ApplicationInfo> getApplicationInfo(ApplicationId applicationId) {
return Optional.ofNullable(applicationsById.get(applicationId));
}
public Optional<ApplicationInfo> getApplicationInfo(HostName hostName) {
return Optional.ofNullable(applicationsByHostname.get(hostName));
}
public List<ApplicationInfo> getApplicationInfos() {
return List.copyOf(applicationsById.values());
}
public void remove(ApplicationId applicationId) {
Set<HostName> hostnames = hostnamesById.remove(applicationId);
if (hostnames != null) {
hostnames.forEach(applicationsByHostname::remove);
}
ApplicationInfo application = applicationsById.remove(applicationId);
if (application != null) {
logger.log(LogLevel.INFO, "Removed application " + applicationId);
listeners.forEach(listener -> listener.applicationRemoved(applicationId));
}
}
} | class DuperModel {
private static Logger logger = Logger.getLogger(DuperModel.class.getName());
private final Map<ApplicationId, ApplicationInfo> applicationsById = new HashMap<>();
private final Map<HostName, ApplicationId> idsByHostname = new HashMap<>();
private final Map<ApplicationId, Set<HostName>> hostnamesById = new HashMap<>();
private final List<DuperModelListener> listeners = new ArrayList<>();
private boolean isComplete = false;
public void registerListener(DuperModelListener listener) {
applicationsById.values().forEach(listener::applicationActivated);
listeners.add(listener);
}
void setComplete() {
if (!isComplete) {
logger.log(LogLevel.INFO, "Bootstrap done - duper model is complete");
isComplete = true;
listeners.forEach(DuperModelListener::bootstrapComplete);
}
}
public boolean isComplete() { return isComplete; }
public int numberOfApplications() {
return applicationsById.size();
}
public int numberOfHosts() {
return idsByHostname.size();
}
public boolean contains(ApplicationId applicationId) {
return applicationsById.containsKey(applicationId);
}
public Optional<ApplicationInfo> getApplicationInfo(ApplicationId applicationId) {
return Optional.ofNullable(applicationsById.get(applicationId));
}
public Optional<ApplicationInfo> getApplicationInfo(HostName hostName) {
return Optional.ofNullable(idsByHostname.get(hostName)).map(applicationsById::get);
}
public List<ApplicationInfo> getApplicationInfos() {
return List.copyOf(applicationsById.values());
}
public void remove(ApplicationId applicationId) {
Set<HostName> hostnames = hostnamesById.remove(applicationId);
if (hostnames != null) {
hostnames.forEach(idsByHostname::remove);
}
ApplicationInfo application = applicationsById.remove(applicationId);
if (application != null) {
logger.log(LogLevel.INFO, "Removed application " + applicationId);
listeners.forEach(listener -> listener.applicationRemoved(applicationId));
}
}
} |
No, I was thinking something like this: ``` Set<HostName> oldHostnames = hostnamesById.get(applicationInfo.getApplicationId()); Set<HostName> hostnames = applicationInfo.getModel().getHosts().stream() .map(HostInfo::getHostname) .map(HostName::from) .collect(Collectors.toSet()); if (!Objects.equals(oldHostnames, hostnames)) { if (oldHostnames != null) { oldHostnames.forEach(applicationsByHostname::remove); } hostnamesById.put(applicationInfo.getApplicationId(), hostnames); hostnames.forEach(hostname -> applicationsByHostname.put(hostname, applicationInfo)); } ``` | public void add(ApplicationInfo applicationInfo) {
ApplicationInfo oldApplicationInfo = applicationsById.put(applicationInfo.getApplicationId(), applicationInfo);
final String logPrefix;
if (oldApplicationInfo == null) {
logPrefix = isComplete ? "New application " : "Bootstrapped application ";
} else {
logPrefix = isComplete ? "Reactivated application " : "Rebootstrapped application ";
}
logger.log(LogLevel.INFO, logPrefix + applicationInfo.getApplicationId());
Set<HostName> oldHostnames = hostnamesById.remove(applicationInfo.getApplicationId());
if (oldHostnames != null) {
oldHostnames.forEach(applicationsByHostname::remove);
}
Set<HostName> hostnames = applicationInfo.getModel().getHosts().stream()
.map(HostInfo::getHostname)
.map(HostName::from)
.collect(Collectors.toSet());
hostnamesById.put(applicationInfo.getApplicationId(), hostnames);
hostnames.forEach(hostname -> applicationsByHostname.put(hostname, applicationInfo));
listeners.forEach(listener -> listener.applicationActivated(applicationInfo));
} | Set<HostName> oldHostnames = hostnamesById.remove(applicationInfo.getApplicationId()); | public void add(ApplicationInfo applicationInfo) {
ApplicationId id = applicationInfo.getApplicationId();
ApplicationInfo oldApplicationInfo = applicationsById.put(id, applicationInfo);
final String logPrefix;
if (oldApplicationInfo == null) {
logPrefix = isComplete ? "New application " : "Bootstrapped application ";
} else {
logPrefix = isComplete ? "Reactivated application " : "Rebootstrapped application ";
}
logger.log(LogLevel.INFO, logPrefix + id);
Set<HostName> hostnames = hostnamesById.computeIfAbsent(id, k -> new HashSet<>());
Set<HostName> removedHosts = new HashSet<>(hostnames);
applicationInfo.getModel().getHosts().stream()
.map(HostInfo::getHostname)
.map(HostName::from)
.forEach(hostname -> {
if (!removedHosts.remove(hostname)) {
hostnames.add(hostname);
ApplicationId previousId = idsByHostname.put(hostname, id);
if (previousId != null && !previousId.equals(id)) {
logger.log(LogLevel.WARNING, hostname + " has been reassigned from " +
previousId + " to " + id);
Set<HostName> previousHostnames = hostnamesById.get(previousId);
if (previousHostnames != null) {
previousHostnames.remove(hostname);
}
}
}
});
removedHosts.forEach(idsByHostname::remove);
listeners.forEach(listener -> listener.applicationActivated(applicationInfo));
} | class DuperModel {
private static Logger logger = Logger.getLogger(DuperModel.class.getName());
private final Map<ApplicationId, ApplicationInfo> applicationsById = new HashMap<>();
private final Map<HostName, ApplicationInfo> applicationsByHostname = new HashMap<>();
private final Map<ApplicationId, Set<HostName>> hostnamesById = new HashMap<>();
private final List<DuperModelListener> listeners = new ArrayList<>();
private boolean isComplete = false;
public void registerListener(DuperModelListener listener) {
applicationsById.values().forEach(listener::applicationActivated);
listeners.add(listener);
}
void setComplete() {
if (!isComplete) {
logger.log(LogLevel.INFO, "Bootstrap done - duper model is complete");
isComplete = true;
listeners.forEach(DuperModelListener::bootstrapComplete);
}
}
public boolean isComplete() { return isComplete; }
public int numberOfApplications() {
return applicationsById.size();
}
public int numberOfHosts() {
return applicationsByHostname.size();
}
public boolean contains(ApplicationId applicationId) {
return applicationsById.containsKey(applicationId);
}
public Optional<ApplicationInfo> getApplicationInfo(ApplicationId applicationId) {
return Optional.ofNullable(applicationsById.get(applicationId));
}
public Optional<ApplicationInfo> getApplicationInfo(HostName hostName) {
return Optional.ofNullable(applicationsByHostname.get(hostName));
}
public List<ApplicationInfo> getApplicationInfos() {
return List.copyOf(applicationsById.values());
}
public void remove(ApplicationId applicationId) {
Set<HostName> hostnames = hostnamesById.remove(applicationId);
if (hostnames != null) {
hostnames.forEach(applicationsByHostname::remove);
}
ApplicationInfo application = applicationsById.remove(applicationId);
if (application != null) {
logger.log(LogLevel.INFO, "Removed application " + applicationId);
listeners.forEach(listener -> listener.applicationRemoved(applicationId));
}
}
} | class DuperModel {
private static Logger logger = Logger.getLogger(DuperModel.class.getName());
private final Map<ApplicationId, ApplicationInfo> applicationsById = new HashMap<>();
private final Map<HostName, ApplicationId> idsByHostname = new HashMap<>();
private final Map<ApplicationId, Set<HostName>> hostnamesById = new HashMap<>();
private final List<DuperModelListener> listeners = new ArrayList<>();
private boolean isComplete = false;
public void registerListener(DuperModelListener listener) {
applicationsById.values().forEach(listener::applicationActivated);
listeners.add(listener);
}
void setComplete() {
if (!isComplete) {
logger.log(LogLevel.INFO, "Bootstrap done - duper model is complete");
isComplete = true;
listeners.forEach(DuperModelListener::bootstrapComplete);
}
}
public boolean isComplete() { return isComplete; }
public int numberOfApplications() {
return applicationsById.size();
}
public int numberOfHosts() {
return idsByHostname.size();
}
public boolean contains(ApplicationId applicationId) {
return applicationsById.containsKey(applicationId);
}
public Optional<ApplicationInfo> getApplicationInfo(ApplicationId applicationId) {
return Optional.ofNullable(applicationsById.get(applicationId));
}
public Optional<ApplicationInfo> getApplicationInfo(HostName hostName) {
return Optional.ofNullable(idsByHostname.get(hostName)).map(applicationsById::get);
}
public List<ApplicationInfo> getApplicationInfos() {
return List.copyOf(applicationsById.values());
}
public void remove(ApplicationId applicationId) {
Set<HostName> hostnames = hostnamesById.remove(applicationId);
if (hostnames != null) {
hostnames.forEach(idsByHostname::remove);
}
ApplicationInfo application = applicationsById.remove(applicationId);
if (application != null) {
logger.log(LogLevel.INFO, "Removed application " + applicationId);
listeners.forEach(listener -> listener.applicationRemoved(applicationId));
}
}
} |
if replica version >= commit info version, the replica is healthy and is reasonable to be removed from `errReplicas`. | public boolean canTxnFinished(TransactionState txn, Set<Long> errReplicas, Set<Long> unfinishedBackends) {
Database db = catalog.getDb(txn.getDbId());
if (db == null) {
return true;
}
db.readLock();
try {
for (TableCommitInfo tableCommitInfo : txn.getIdToTableCommitInfos().values()) {
long tableId = tableCommitInfo.getTableId();
OlapTable table = (OlapTable) db.getTable(tableId);
if (table == null) {
continue;
}
PartitionInfo partitionInfo = table.getPartitionInfo();
for (PartitionCommitInfo partitionCommitInfo : tableCommitInfo.getIdToPartitionCommitInfo().values()) {
long partitionId = partitionCommitInfo.getPartitionId();
Partition partition = table.getPartition(partitionId);
if (partition == null) {
continue;
}
if (partition.getVisibleVersion() != partitionCommitInfo.getVersion() - 1) {
return false;
}
if (partition.isUseStarOS()) {
continue;
}
List<MaterializedIndex> allIndices = txn.getPartitionLoadedTblIndexes(tableId, partition);
int quorumNum = partitionInfo.getQuorumNum(partitionId);
for (MaterializedIndex index : allIndices) {
for (Tablet tablet : index.getTablets()) {
int successHealthyReplicaNum = 0;
for (Replica replica : ((LocalTablet) tablet).getReplicas()) {
if (!errReplicas.contains(replica.getId())) {
if (replica.checkVersionCatchUp(partition.getVisibleVersion(), true)
&& replica.getLastFailedVersion() < 0
&& (unfinishedBackends == null
|| !unfinishedBackends.contains(replica.getBackendId()))) {
++successHealthyReplicaNum;
} else if (replica.getVersion() >= partitionCommitInfo.getVersion()) {
++successHealthyReplicaNum;
} else if (unfinishedBackends != null
&& unfinishedBackends.contains(replica.getBackendId())) {
errReplicas.add(replica.getId());
}
} else if (replica.getVersion() >= partitionCommitInfo.getVersion()) {
errReplicas.remove(replica.getId());
++successHealthyReplicaNum;
}
}
if (successHealthyReplicaNum < quorumNum) {
return false;
}
}
}
}
}
} finally {
db.readUnlock();
}
return true;
} | } else if (replica.getVersion() >= partitionCommitInfo.getVersion()) { | public boolean canTxnFinished(TransactionState txn, Set<Long> errReplicas, Set<Long> unfinishedBackends) {
Database db = catalog.getDb(txn.getDbId());
if (db == null) {
return true;
}
db.readLock();
try {
for (TableCommitInfo tableCommitInfo : txn.getIdToTableCommitInfos().values()) {
long tableId = tableCommitInfo.getTableId();
OlapTable table = (OlapTable) db.getTable(tableId);
if (table == null) {
continue;
}
PartitionInfo partitionInfo = table.getPartitionInfo();
for (PartitionCommitInfo partitionCommitInfo : tableCommitInfo.getIdToPartitionCommitInfo().values()) {
long partitionId = partitionCommitInfo.getPartitionId();
Partition partition = table.getPartition(partitionId);
if (partition == null) {
continue;
}
if (partition.getVisibleVersion() != partitionCommitInfo.getVersion() - 1) {
return false;
}
if (partition.isUseStarOS()) {
continue;
}
List<MaterializedIndex> allIndices = txn.getPartitionLoadedTblIndexes(tableId, partition);
int quorumNum = partitionInfo.getQuorumNum(partitionId);
for (MaterializedIndex index : allIndices) {
for (Tablet tablet : index.getTablets()) {
int successHealthyReplicaNum = 0;
for (Replica replica : ((LocalTablet) tablet).getReplicas()) {
if (!errReplicas.contains(replica.getId())) {
if (replica.checkVersionCatchUp(partition.getVisibleVersion(), true)
&& replica.getLastFailedVersion() < 0
&& (unfinishedBackends == null
|| !unfinishedBackends.contains(replica.getBackendId()))) {
++successHealthyReplicaNum;
} else if (replica.getVersion() >= partitionCommitInfo.getVersion()) {
++successHealthyReplicaNum;
} else if (unfinishedBackends != null
&& unfinishedBackends.contains(replica.getBackendId())) {
errReplicas.add(replica.getId());
}
} else if (replica.getVersion() >= partitionCommitInfo.getVersion()) {
errReplicas.remove(replica.getId());
++successHealthyReplicaNum;
}
}
if (successHealthyReplicaNum < quorumNum) {
return false;
}
}
}
}
}
} finally {
db.readUnlock();
}
return true;
} | class DatabaseTransactionMgr {
private static final Logger LOG = LogManager.getLogger(DatabaseTransactionMgr.class);
private long dbId;
private ReentrantReadWriteLock transactionLock = new ReentrantReadWriteLock(true);
private Map<Long, TransactionState> idToRunningTransactionState = Maps.newHashMap();
private Map<Long, TransactionState> idToFinalStatusTransactionState = Maps.newHashMap();
private ArrayDeque<TransactionState> finalStatusTransactionStateDeque = new ArrayDeque<>();
private Map<String, Set<Long>> labelToTxnIds = Maps.newHashMap();
private int runningTxnNums = 0;
private int runningRoutineLoadTxnNums = 0;
private Catalog catalog;
private EditLog editLog;
private TransactionIdGenerator idGenerator;
private List<ClearTransactionTask> clearTransactionTasks = Lists.newArrayList();
private volatile long usedQuotaDataBytes = -1;
protected void readLock() {
this.transactionLock.readLock().lock();
}
protected void readUnlock() {
this.transactionLock.readLock().unlock();
}
protected void writeLock() {
this.transactionLock.writeLock().lock();
}
protected void writeUnlock() {
this.transactionLock.writeLock().unlock();
}
public DatabaseTransactionMgr(long dbId, Catalog catalog, TransactionIdGenerator idGenerator) {
this.dbId = dbId;
this.catalog = catalog;
this.idGenerator = idGenerator;
this.editLog = catalog.getEditLog();
}
public long getDbId() {
return dbId;
}
public TransactionState getTransactionState(Long transactionId) {
readLock();
try {
TransactionState transactionState = idToRunningTransactionState.get(transactionId);
if (transactionState != null) {
return transactionState;
} else {
return idToFinalStatusTransactionState.get(transactionId);
}
} finally {
readUnlock();
}
}
private TransactionState unprotectedGetTransactionState(Long transactionId) {
TransactionState transactionState = idToRunningTransactionState.get(transactionId);
if (transactionState != null) {
return transactionState;
} else {
return idToFinalStatusTransactionState.get(transactionId);
}
}
@VisibleForTesting
protected Set<Long> unprotectedGetTxnIdsByLabel(String label) {
return labelToTxnIds.get(label);
}
@VisibleForTesting
protected int getRunningTxnNums() {
return runningTxnNums;
}
@VisibleForTesting
protected int getRunningRoutineLoadTxnNums() {
return runningRoutineLoadTxnNums;
}
@VisibleForTesting
protected int getFinishedTxnNums() {
return finalStatusTransactionStateDeque.size();
}
public List<List<String>> getTxnStateInfoList(boolean running, int limit) {
List<List<String>> infos = Lists.newArrayList();
Collection<TransactionState> transactionStateCollection = null;
readLock();
try {
if (running) {
transactionStateCollection = idToRunningTransactionState.values();
} else {
transactionStateCollection = finalStatusTransactionStateDeque;
}
transactionStateCollection.stream()
.sorted(TransactionState.TXN_ID_COMPARATOR)
.limit(limit)
.forEach(t -> {
List<String> info = Lists.newArrayList();
getTxnStateInfo(t, info);
infos.add(info);
});
} finally {
readUnlock();
}
return infos;
}
private void getTxnStateInfo(TransactionState txnState, List<String> info) {
info.add(String.valueOf(txnState.getTransactionId()));
info.add(txnState.getLabel());
info.add(txnState.getCoordinator().toString());
info.add(txnState.getTransactionStatus().name());
info.add(txnState.getSourceType().name());
info.add(TimeUtils.longToTimeString(txnState.getPrepareTime()));
info.add(TimeUtils.longToTimeString(txnState.getCommitTime()));
info.add(TimeUtils.longToTimeString(txnState.getPublishVersionTime()));
info.add(TimeUtils.longToTimeString(txnState.getFinishTime()));
info.add(txnState.getReason());
info.add(String.valueOf(txnState.getErrorReplicas().size()));
info.add(String.valueOf(txnState.getCallbackId()));
info.add(String.valueOf(txnState.getTimeoutMs()));
info.add(txnState.getErrMsg());
}
public long beginTransaction(List<Long> tableIdList, String label, TUniqueId requestId,
TransactionState.TxnCoordinator coordinator,
TransactionState.LoadJobSourceType sourceType, long listenerId, long timeoutSecond)
throws DuplicatedRequestException, LabelAlreadyUsedException, BeginTransactionException, AnalysisException {
checkDatabaseDataQuota();
writeLock();
try {
Preconditions.checkNotNull(coordinator);
Preconditions.checkNotNull(label);
FeNameFormat.checkLabel(label);
/*
* Check if label already used, by following steps
* 1. get all existing transactions
* 2. if there is a PREPARE transaction, check if this is a retry request. If yes, return the
* existing txn id.
* 3. if there is a non-aborted transaction, throw label already used exception.
*/
Set<Long> existingTxnIds = unprotectedGetTxnIdsByLabel(label);
if (existingTxnIds != null && !existingTxnIds.isEmpty()) {
List<TransactionState> notAbortedTxns = Lists.newArrayList();
for (long txnId : existingTxnIds) {
TransactionState txn = unprotectedGetTransactionState(txnId);
Preconditions.checkNotNull(txn);
if (txn.getTransactionStatus() != TransactionStatus.ABORTED) {
notAbortedTxns.add(txn);
}
}
Preconditions.checkState(notAbortedTxns.size() <= 1, notAbortedTxns);
if (!notAbortedTxns.isEmpty()) {
TransactionState notAbortedTxn = notAbortedTxns.get(0);
if (requestId != null && notAbortedTxn.getTransactionStatus() == TransactionStatus.PREPARE
&& notAbortedTxn.getRequestId() != null && notAbortedTxn.getRequestId().equals(requestId)) {
throw new DuplicatedRequestException(DebugUtil.printId(requestId),
notAbortedTxn.getTransactionId(), "");
}
throw new LabelAlreadyUsedException(label, notAbortedTxn.getTransactionStatus());
}
}
checkRunningTxnExceedLimit(sourceType);
long tid = idGenerator.getNextTransactionId();
LOG.info("begin transaction: txn id {} with label {} from coordinator {}, listner id: {}",
tid, label, coordinator, listenerId);
TransactionState transactionState =
new TransactionState(dbId, tableIdList, tid, label, requestId, sourceType,
coordinator, listenerId, timeoutSecond * 1000);
transactionState.setPrepareTime(System.currentTimeMillis());
unprotectUpsertTransactionState(transactionState, false);
if (MetricRepo.isInit) {
MetricRepo.COUNTER_TXN_BEGIN.increase(1L);
}
return tid;
} catch (DuplicatedRequestException e) {
throw e;
} catch (Exception e) {
if (MetricRepo.isInit) {
MetricRepo.COUNTER_TXN_REJECT.increase(1L);
}
throw e;
} finally {
writeUnlock();
}
}
private void checkDatabaseDataQuota() throws AnalysisException {
Database db = catalog.getDb(dbId);
if (db == null) {
throw new AnalysisException("Database[" + dbId + "] does not exist");
}
if (usedQuotaDataBytes == -1) {
usedQuotaDataBytes = db.getUsedDataQuotaWithLock();
}
long dataQuotaBytes = db.getDataQuota();
if (usedQuotaDataBytes >= dataQuotaBytes) {
Pair<Double, String> quotaUnitPair = DebugUtil.getByteUint(dataQuotaBytes);
String readableQuota = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(quotaUnitPair.first) + " "
+ quotaUnitPair.second;
throw new AnalysisException("Database[" + db.getFullName()
+ "] data size exceeds quota[" + readableQuota + "]");
}
}
public void updateDatabaseUsedQuotaData(long usedQuotaDataBytes) {
this.usedQuotaDataBytes = usedQuotaDataBytes;
}
/**
* commit transaction process as follows:
* 1. validate whether `Load` is cancelled
* 2. validate whether `Table` is deleted
* 3. validate replicas consistency
* 4. update transaction state version
* 5. persistent transactionState
* 6. update nextVersion because of the failure of persistent transaction resulting in error version
*/
public void commitTransaction(long transactionId, List<TabletCommitInfo> tabletCommitInfos,
TxnCommitAttachment txnCommitAttachment)
throws UserException {
Database db = catalog.getDb(dbId);
if (null == db) {
throw new MetaNotFoundException("could not find db [" + dbId + "]");
}
TransactionState transactionState = null;
readLock();
try {
transactionState = unprotectedGetTransactionState(transactionId);
} finally {
readUnlock();
}
if (transactionState == null
|| transactionState.getTransactionStatus() == TransactionStatus.ABORTED) {
throw new TransactionCommitFailedException(
transactionState == null ? "transaction not found" : transactionState.getReason());
}
if (transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) {
LOG.debug("transaction is already visible: {}", transactionId);
return;
}
if (transactionState.getTransactionStatus() == TransactionStatus.COMMITTED) {
LOG.debug("transaction is already committed: {}", transactionId);
return;
}
if (tabletCommitInfos == null || tabletCommitInfos.isEmpty()) {
throw new TransactionCommitFailedException(TransactionCommitFailedException.NO_DATA_TO_LOAD_MSG);
}
if (txnCommitAttachment != null) {
transactionState.setTxnCommitAttachment(txnCommitAttachment);
}
TabletInvertedIndex tabletInvertedIndex = catalog.getTabletInvertedIndex();
Map<Long, Set<Long>> tabletToBackends = new HashMap<>();
Map<Long, Set<Long>> tableToPartition = new HashMap<>();
Map<Long, Set<String>> tableToInvalidDictCacheColumns = new HashMap<>();
Map<Long, Set<String>> tableToValidDictCacheColumns = new HashMap<>();
List<Long> tabletIds = tabletCommitInfos.stream().map(
TabletCommitInfo::getTabletId).collect(Collectors.toList());
List<TabletMeta> tabletMetaList = tabletInvertedIndex.getTabletMetaList(tabletIds);
for (int i = 0; i < tabletMetaList.size(); i++) {
TabletMeta tabletMeta = tabletMetaList.get(i);
if (tabletMeta == TabletInvertedIndex.NOT_EXIST_TABLET_META) {
continue;
}
long tabletId = tabletIds.get(i);
long tableId = tabletMeta.getTableId();
OlapTable tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
continue;
}
if (tbl.getState() == OlapTable.OlapTableState.RESTORE) {
throw new LoadException("Table " + tbl.getName() + " is in restore process. "
+ "Can not load into it");
}
long partitionId = tabletMeta.getPartitionId();
if (tbl.getPartition(partitionId) == null) {
continue;
}
if (!tableToPartition.containsKey(tableId)) {
tableToPartition.put(tableId, new HashSet<>());
}
tableToPartition.get(tableId).add(partitionId);
if (!tabletToBackends.containsKey(tabletId)) {
tabletToBackends.put(tabletId, new HashSet<>());
}
tabletToBackends.get(tabletId).add(tabletCommitInfos.get(i).getBackendId());
if (!tableToInvalidDictCacheColumns.containsKey(tableId)) {
tableToInvalidDictCacheColumns.put(tableId, new HashSet<>());
}
tableToInvalidDictCacheColumns.get(tableId).addAll(tabletCommitInfos.get(i).getInvalidDictCacheColumns());
if (!tableToValidDictCacheColumns.containsKey(tableId)) {
tableToValidDictCacheColumns.put(tableId, new HashSet<>());
}
if (tableToValidDictCacheColumns.get(tableId).isEmpty() &&
!tabletCommitInfos.get(i).getValidDictCacheColumns().isEmpty()) {
tableToValidDictCacheColumns.get(tableId).addAll(tabletCommitInfos.get(i).getValidDictCacheColumns());
}
if (i == tabletMetaList.size() - 1) {
tableToValidDictCacheColumns.get(tableId).removeAll(tableToInvalidDictCacheColumns.get(tableId));
}
}
if (tableToPartition.isEmpty()) {
throw new TransactionCommitFailedException(TransactionCommitFailedException.NO_DATA_TO_LOAD_MSG);
}
Set<Long> errorReplicaIds = Sets.newHashSet();
Set<Long> totalInvolvedBackends = Sets.newHashSet();
for (long tableId : tableToPartition.keySet()) {
OlapTable table = (OlapTable) db.getTable(tableId);
if (table == null) {
throw new MetaNotFoundException("Table does not exist: " + tableId);
}
for (Partition partition : table.getAllPartitions()) {
if (!tableToPartition.get(tableId).contains(partition.getId())) {
continue;
}
boolean useStarOS = partition.isUseStarOS();
List<MaterializedIndex> allIndices = transactionState.getPartitionLoadedTblIndexes(tableId, partition);
int quorumReplicaNum = table.getPartitionInfo().getQuorumNum(partition.getId());
for (MaterializedIndex index : allIndices) {
for (Tablet tablet : index.getTablets()) {
long tabletId = tablet.getId();
Set<Long> commitBackends = tabletToBackends.get(tabletId);
if (useStarOS) {
long backendId = ((StarOSTablet) tablet).getPrimaryBackendId();
totalInvolvedBackends.add(backendId);
if (!commitBackends.contains(backendId)) {
throw new TransactionCommitFailedException(
"Primary backend: " + backendId + " does not in commit backends: " +
Joiner.on(",").join(commitBackends));
}
} else {
Set<Long> tabletBackends = tablet.getBackendIds();
totalInvolvedBackends.addAll(tabletBackends);
Set<Long> errorBackendIdsForTablet = Sets.newHashSet();
int successReplicaNum = 0;
for (long tabletBackend : tabletBackends) {
Replica replica = tabletInvertedIndex.getReplica(tabletId, tabletBackend);
if (replica == null) {
Backend backend = Catalog.getCurrentSystemInfo().getBackend(tabletBackend);
throw new TransactionCommitFailedException("Not found replicas of tablet. "
+ "tablet_id: " + tabletId + ", backend_id: " + backend.getHost());
}
if (commitBackends != null && commitBackends.contains(tabletBackend)) {
if (replica.getLastFailedVersion() < 0) {
++successReplicaNum;
}
} else {
errorBackendIdsForTablet.add(tabletBackend);
errorReplicaIds.add(replica.getId());
}
}
if (successReplicaNum < quorumReplicaNum) {
List<String> errorBackends = new ArrayList<String>();
for (long backendId : errorBackendIdsForTablet) {
Backend backend = Catalog.getCurrentSystemInfo().getBackend(backendId);
errorBackends.add(backend.getId() + ":" + backend.getHost());
}
LOG.warn("Fail to load files. tablet_id: {}, txn_id: {}, backends: {}",
tablet.getId(), transactionId,
Joiner.on(",").join(errorBackends));
throw new TabletQuorumFailedException(tablet.getId(), transactionId, errorBackends);
}
}
}
}
}
}
transactionState.beforeStateTransform(TransactionStatus.COMMITTED);
boolean txnOperated = false;
writeLock();
try {
unprotectedCommitTransaction(transactionState, errorReplicaIds, tableToPartition,
tableToInvalidDictCacheColumns, tableToValidDictCacheColumns,
totalInvolvedBackends, db);
txnOperated = true;
} finally {
writeUnlock();
transactionState.afterStateTransform(TransactionStatus.COMMITTED, txnOperated);
}
updateCatalogAfterCommitted(transactionState, db);
LOG.info("transaction:[{}] successfully committed", transactionState);
}
public boolean publishTransaction(Database db, long transactionId, long timeoutMillis)
throws TransactionCommitFailedException {
TransactionState transactionState = null;
readLock();
try {
transactionState = unprotectedGetTransactionState(transactionId);
} finally {
readUnlock();
}
switch (transactionState.getTransactionStatus()) {
case COMMITTED:
case VISIBLE:
break;
default:
LOG.warn("transaction commit failed, db={}, txn={}", db.getFullName(), transactionId);
throw new TransactionCommitFailedException("transaction commit failed");
}
long currentTimeMillis = System.currentTimeMillis();
long timeoutTimeMillis = currentTimeMillis + timeoutMillis;
while (currentTimeMillis < timeoutTimeMillis &&
transactionState.getTransactionStatus() == TransactionStatus.COMMITTED) {
try {
transactionState.waitTransactionVisible(timeoutMillis);
} catch (InterruptedException e) {
LOG.info("timed out while waiting for transaction {} to be visible", transactionId);
}
currentTimeMillis = System.currentTimeMillis();
}
return transactionState.getTransactionStatus() == TransactionStatus.VISIBLE;
}
public void deleteTransaction(TransactionState transactionState) {
writeLock();
try {
if (!finalStatusTransactionStateDeque.isEmpty() &&
transactionState.getTransactionId() ==
finalStatusTransactionStateDeque.getFirst().getTransactionId()) {
finalStatusTransactionStateDeque.pop();
clearTransactionState(transactionState);
}
} finally {
writeUnlock();
}
}
public TransactionStatus getLabelState(String label) {
readLock();
try {
Set<Long> existingTxnIds = unprotectedGetTxnIdsByLabel(label);
if (existingTxnIds == null || existingTxnIds.isEmpty()) {
return TransactionStatus.UNKNOWN;
}
long maxTxnId = existingTxnIds.stream().max(Comparator.comparingLong(Long::valueOf)).get();
return unprotectedGetTransactionState(maxTxnId).getTransactionStatus();
} finally {
readUnlock();
}
}
public List<TransactionState> getCommittedTxnList() {
readLock();
try {
return idToRunningTransactionState.values().stream()
.filter(transactionState -> (transactionState.getTransactionStatus() ==
TransactionStatus.COMMITTED))
.sorted(Comparator.comparing(TransactionState::getCommitTime))
.collect(Collectors.toList());
} finally {
readUnlock();
}
}
public void finishTransaction(long transactionId, Set<Long> errorReplicaIds) throws UserException {
TransactionState transactionState = null;
readLock();
try {
transactionState = unprotectedGetTransactionState(transactionId);
} finally {
readUnlock();
}
if (errorReplicaIds == null) {
errorReplicaIds = Sets.newHashSet();
}
Set<Long> originalErrorReplicas = transactionState.getErrorReplicas();
if (originalErrorReplicas != null) {
errorReplicaIds.addAll(originalErrorReplicas);
}
Database db = catalog.getDb(transactionState.getDbId());
if (db == null) {
writeLock();
try {
transactionState.setTransactionStatus(TransactionStatus.ABORTED);
transactionState.setReason("db is dropped");
LOG.warn("db is dropped during transaction, abort transaction {}", transactionState);
unprotectUpsertTransactionState(transactionState, false);
return;
} finally {
writeUnlock();
}
}
db.writeLock();
try {
boolean hasError = false;
for (TableCommitInfo tableCommitInfo : transactionState.getIdToTableCommitInfos().values()) {
long tableId = tableCommitInfo.getTableId();
OlapTable table = (OlapTable) db.getTable(tableId);
if (table == null) {
transactionState.removeTable(tableId);
LOG.warn("table {} is dropped, skip version check and remove it from transaction state {}",
tableId,
transactionState);
continue;
}
PartitionInfo partitionInfo = table.getPartitionInfo();
for (PartitionCommitInfo partitionCommitInfo : tableCommitInfo.getIdToPartitionCommitInfo().values()) {
long partitionId = partitionCommitInfo.getPartitionId();
Partition partition = table.getPartition(partitionId);
if (partition == null) {
tableCommitInfo.removePartition(partitionId);
LOG.warn("partition {} is dropped, skip version check and remove it from transaction state {}",
partitionId,
transactionState);
continue;
}
if (partition.getVisibleVersion() != partitionCommitInfo.getVersion() - 1) {
if (transactionState.getLastErrTimeMs() + 3000 < System.nanoTime() / 1000000) {
LOG.debug("transactionId {} partition commitInfo version {} is not equal with " +
"partition visible version {} plus one, need wait",
transactionId,
partitionCommitInfo.getVersion(),
partition.getVisibleVersion());
}
String errMsg =
String.format("wait for publishing partition %d version %d. self version: %d. table %d",
partitionId, partition.getVisibleVersion() + 1,
partitionCommitInfo.getVersion(), tableId);
transactionState.setErrorMsg(errMsg);
return;
}
if (partition.isUseStarOS()) {
continue;
}
int quorumReplicaNum = partitionInfo.getQuorumNum(partitionId);
List<MaterializedIndex> allIndices =
transactionState.getPartitionLoadedTblIndexes(tableId, partition);
for (MaterializedIndex index : allIndices) {
for (Tablet tablet : index.getTablets()) {
int healthReplicaNum = 0;
for (Replica replica : ((LocalTablet) tablet).getReplicas()) {
if (!errorReplicaIds.contains(replica.getId())
&& replica.getLastFailedVersion() < 0) {
if (replica.checkVersionCatchUp(partition.getVisibleVersion(), true)) {
replica.updateRowCount(partitionCommitInfo.getVersion(),
replica.getDataSize(), replica.getRowCount());
++healthReplicaNum;
} else {
replica.updateVersionInfo(replica.getVersion(),
partition.getVisibleVersion(),
partitionCommitInfo.getVersion());
LOG.warn("transaction state {} has error, the replica [{}] not appeared " +
"in error replica list and its version not equal to partition " +
"commit version or commit version - 1 if its not a upgrate " +
"stage, its a fatal error. ",
transactionState, replica);
}
} else if (replica.getVersion() >= partitionCommitInfo.getVersion()) {
errorReplicaIds.remove(replica.getId());
++healthReplicaNum;
}
}
if (healthReplicaNum < quorumReplicaNum) {
if (transactionState.getLastErrTimeMs() + 3000 < System.nanoTime() / 1000000) {
LOG.info("publish version failed for transaction {} on tablet {}, with only {} " +
"replicas less than quorum {}", transactionState, tablet, healthReplicaNum,
quorumReplicaNum);
}
String errMsg = String.format(
"publish on tablet %d failed. succeed replica num %d less than quorum %d."
+ " table: %d, partition: %d, publish version: %d",
tablet.getId(), healthReplicaNum, quorumReplicaNum, tableId, partitionId,
partition.getVisibleVersion() + 1);
transactionState.setErrorMsg(errMsg);
hasError = true;
}
}
}
}
}
if (hasError) {
return;
}
boolean txnOperated = false;
writeLock();
try {
transactionState.setErrorReplicas(errorReplicaIds);
transactionState.setFinishTime(System.currentTimeMillis());
transactionState.clearErrorMsg();
transactionState.setTransactionStatus(TransactionStatus.VISIBLE);
unprotectUpsertTransactionState(transactionState, false);
txnOperated = true;
LOG.debug("after set transaction {} to visible", transactionState);
} finally {
writeUnlock();
transactionState.afterStateTransform(TransactionStatus.VISIBLE, txnOperated);
}
updateCatalogAfterVisible(transactionState, db);
} finally {
db.writeUnlock();
}
LOG.info("finish transaction {} successfully", transactionState);
}
protected void unprotectedCommitTransaction(TransactionState transactionState, Set<Long> errorReplicaIds,
Map<Long, Set<Long>> tableToPartition,
Map<Long, Set<String>> tableToInvalidDictColumns,
Map<Long, Set<String>> tableToValidDictColumns,
Set<Long> totalInvolvedBackends,
Database db) {
if (transactionState.getTransactionStatus() != TransactionStatus.PREPARE) {
return;
}
transactionState.setCommitTime(System.currentTimeMillis());
transactionState.setTransactionStatus(TransactionStatus.COMMITTED);
transactionState.setErrorReplicas(errorReplicaIds);
for (long tableId : tableToPartition.keySet()) {
TableCommitInfo tableCommitInfo = new TableCommitInfo(tableId);
boolean isFirstPartition = true;
for (long partitionId : tableToPartition.get(tableId)) {
OlapTable table = (OlapTable) db.getTable(tableId);
Partition partition = table.getPartition(partitionId);
PartitionCommitInfo partitionCommitInfo;
if (isFirstPartition) {
partitionCommitInfo = new PartitionCommitInfo(partitionId,
partition.getNextVersion(),
System.currentTimeMillis(),
Lists.newArrayList(tableToInvalidDictColumns.get(tableId)),
Lists.newArrayList(tableToValidDictColumns.get(tableId)));
} else {
partitionCommitInfo = new PartitionCommitInfo(partitionId,
partition.getNextVersion(),
System.currentTimeMillis() /* use as partition visible time */);
}
tableCommitInfo.addPartitionCommitInfo(partitionCommitInfo);
isFirstPartition = false;
}
transactionState.putIdToTableCommitInfo(tableId, tableCommitInfo);
}
unprotectUpsertTransactionState(transactionState, false);
for (long backendId : totalInvolvedBackends) {
transactionState.addPublishVersionTask(backendId, null);
}
}
protected void unprotectUpsertTransactionState(TransactionState transactionState, boolean isReplay) {
if (!isReplay) {
if (transactionState.getTransactionStatus() != TransactionStatus.PREPARE
|| transactionState.getSourceType() == TransactionState.LoadJobSourceType.FRONTEND) {
editLog.logInsertTransactionState(transactionState);
}
}
if (!transactionState.getTransactionStatus().isFinalStatus()) {
if (idToRunningTransactionState.put(transactionState.getTransactionId(), transactionState) == null) {
if (transactionState.getSourceType() == TransactionState.LoadJobSourceType.ROUTINE_LOAD_TASK) {
runningRoutineLoadTxnNums++;
} else {
runningTxnNums++;
}
}
} else {
if (idToRunningTransactionState.remove(transactionState.getTransactionId()) != null) {
if (transactionState.getSourceType() == TransactionState.LoadJobSourceType.ROUTINE_LOAD_TASK) {
runningRoutineLoadTxnNums--;
} else {
runningTxnNums--;
}
}
idToFinalStatusTransactionState.put(transactionState.getTransactionId(), transactionState);
finalStatusTransactionStateDeque.add(transactionState);
}
updateTxnLabels(transactionState);
}
private void updateTxnLabels(TransactionState transactionState) {
Set<Long> txnIds = labelToTxnIds.get(transactionState.getLabel());
if (txnIds == null) {
txnIds = Sets.newHashSet();
labelToTxnIds.put(transactionState.getLabel(), txnIds);
}
txnIds.add(transactionState.getTransactionId());
}
public void abortTransaction(String label, String reason) throws UserException {
Preconditions.checkNotNull(label);
long transactionId = -1;
readLock();
try {
Set<Long> existingTxns = unprotectedGetTxnIdsByLabel(label);
if (existingTxns == null || existingTxns.isEmpty()) {
throw new TransactionNotFoundException("transaction not found, label=" + label);
}
TransactionState prepareTxn = null;
for (Long txnId : existingTxns) {
TransactionState txn = unprotectedGetTransactionState(txnId);
if (txn.getTransactionStatus() == TransactionStatus.PREPARE) {
prepareTxn = txn;
break;
}
}
if (prepareTxn == null) {
throw new TransactionNotFoundException("running transaction not found, label=" + label);
}
transactionId = prepareTxn.getTransactionId();
} finally {
readUnlock();
}
abortTransaction(transactionId, reason, null);
}
public void abortTransaction(long transactionId, String reason, TxnCommitAttachment txnCommitAttachment)
throws UserException {
if (transactionId < 0) {
LOG.info("transaction id is {}, less than 0, maybe this is an old type load job, ignore abort operation",
transactionId);
return;
}
TransactionState transactionState = null;
readLock();
try {
transactionState = idToRunningTransactionState.get(transactionId);
} finally {
readUnlock();
}
if (transactionState == null) {
throw new TransactionNotFoundException("transaction not found", transactionId);
}
if (txnCommitAttachment != null) {
transactionState.setTxnCommitAttachment(txnCommitAttachment);
}
transactionState.beforeStateTransform(TransactionStatus.ABORTED);
boolean txnOperated = false;
writeLock();
try {
txnOperated = unprotectAbortTransaction(transactionId, reason);
} finally {
writeUnlock();
transactionState.afterStateTransform(TransactionStatus.ABORTED, txnOperated, reason);
}
if (txnOperated && transactionState.getTransactionStatus() == TransactionStatus.ABORTED) {
clearBackendTransactions(transactionState);
}
}
private boolean unprotectAbortTransaction(long transactionId, String reason)
throws UserException {
TransactionState transactionState = unprotectedGetTransactionState(transactionId);
if (transactionState == null) {
throw new TransactionNotFoundException("transaction not found", transactionId);
}
if (transactionState.getTransactionStatus() == TransactionStatus.ABORTED) {
return false;
}
if (transactionState.getTransactionStatus() == TransactionStatus.COMMITTED
|| transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) {
throw new UserException("transaction's state is already "
+ transactionState.getTransactionStatus() + ", could not abort");
}
transactionState.setFinishTime(System.currentTimeMillis());
transactionState.setReason(reason);
transactionState.setTransactionStatus(TransactionStatus.ABORTED);
unprotectUpsertTransactionState(transactionState, false);
for (PublishVersionTask task : transactionState.getPublishVersionTasks().values()) {
AgentTaskQueue.removeTask(task.getBackendId(), TTaskType.PUBLISH_VERSION, task.getSignature());
}
return true;
}
private void clearBackendTransactions(TransactionState transactionState) {
Preconditions.checkState(transactionState.getTransactionStatus() == TransactionStatus.ABORTED);
List<Long> allBeIds = Catalog.getCurrentSystemInfo().getBackendIds(false);
AgentBatchTask batchTask = null;
synchronized (clearTransactionTasks) {
for (Long beId : allBeIds) {
ClearTransactionTask task =
new ClearTransactionTask(beId, transactionState.getTransactionId(), Lists.newArrayList());
clearTransactionTasks.add(task);
}
if (clearTransactionTasks.size() > allBeIds.size() * 2) {
batchTask = new AgentBatchTask();
for (ClearTransactionTask clearTransactionTask : clearTransactionTasks) {
batchTask.addTask(clearTransactionTask);
}
clearTransactionTasks.clear();
}
}
if (batchTask != null) {
AgentTaskExecutor.submit(batchTask);
}
}
protected List<List<Comparable>> getTableTransInfo(long txnId) throws AnalysisException {
List<List<Comparable>> tableInfos = new ArrayList<>();
readLock();
try {
TransactionState transactionState = unprotectedGetTransactionState(txnId);
if (null == transactionState) {
throw new AnalysisException("Transaction[" + txnId + "] does not exist.");
}
for (Map.Entry<Long, TableCommitInfo> entry : transactionState.getIdToTableCommitInfos().entrySet()) {
List<Comparable> tableInfo = new ArrayList<>();
tableInfo.add(entry.getKey());
tableInfo.add(Joiner.on(", ").join(entry.getValue().getIdToPartitionCommitInfo().values().stream().map(
PartitionCommitInfo::getPartitionId).collect(Collectors.toList())));
tableInfos.add(tableInfo);
}
} finally {
readUnlock();
}
return tableInfos;
}
protected List<List<Comparable>> getPartitionTransInfo(long txnId, long tableId) throws AnalysisException {
List<List<Comparable>> partitionInfos = new ArrayList<List<Comparable>>();
readLock();
try {
TransactionState transactionState = unprotectedGetTransactionState(txnId);
if (null == transactionState) {
throw new AnalysisException("Transaction[" + txnId + "] does not exist.");
}
TableCommitInfo tableCommitInfo = transactionState.getIdToTableCommitInfos().get(tableId);
Map<Long, PartitionCommitInfo> idToPartitionCommitInfo = tableCommitInfo.getIdToPartitionCommitInfo();
for (Map.Entry<Long, PartitionCommitInfo> entry : idToPartitionCommitInfo.entrySet()) {
List<Comparable> partitionInfo = new ArrayList<Comparable>();
partitionInfo.add(entry.getKey());
partitionInfo.add(entry.getValue().getVersion());
partitionInfos.add(partitionInfo);
}
} finally {
readUnlock();
}
return partitionInfos;
}
public void removeExpiredTxns(long currentMillis) {
writeLock();
try {
int numJobsToRemove = getTransactionNum() - Config.label_keep_max_num;
while (!finalStatusTransactionStateDeque.isEmpty()) {
TransactionState transactionState = finalStatusTransactionStateDeque.getFirst();
if (transactionState.isExpired(currentMillis) || numJobsToRemove > 0) {
finalStatusTransactionStateDeque.pop();
clearTransactionState(transactionState);
--numJobsToRemove;
LOG.info("transaction [" + transactionState.getTransactionId() +
"] is expired, remove it from transaction manager");
} else {
break;
}
}
} finally {
writeUnlock();
}
}
private void clearTransactionState(TransactionState transactionState) {
idToFinalStatusTransactionState.remove(transactionState.getTransactionId());
Set<Long> txnIds = unprotectedGetTxnIdsByLabel(transactionState.getLabel());
txnIds.remove(transactionState.getTransactionId());
if (txnIds.isEmpty()) {
labelToTxnIds.remove(transactionState.getLabel());
}
}
public int getTransactionNum() {
return idToRunningTransactionState.size() + finalStatusTransactionStateDeque.size();
}
public TransactionState getTransactionStateByCallbackIdAndStatus(long callbackId, Set<TransactionStatus> status) {
readLock();
try {
for (TransactionState txn : idToRunningTransactionState.values()) {
if (txn.getCallbackId() == callbackId && status.contains(txn.getTransactionStatus())) {
return txn;
}
}
for (TransactionState txn : finalStatusTransactionStateDeque) {
if (txn.getCallbackId() == callbackId && status.contains(txn.getTransactionStatus())) {
return txn;
}
}
} finally {
readUnlock();
}
return null;
}
public TransactionState getTransactionStateByCallbackId(long callbackId) {
readLock();
try {
for (TransactionState txn : idToRunningTransactionState.values()) {
if (txn.getCallbackId() == callbackId) {
return txn;
}
}
for (TransactionState txn : finalStatusTransactionStateDeque) {
if (txn.getCallbackId() == callbackId) {
return txn;
}
}
} finally {
readUnlock();
}
return null;
}
public List<Pair<Long, Long>> getTransactionIdByCoordinateBe(String coordinateHost, int limit) {
ArrayList<Pair<Long, Long>> txnInfos = new ArrayList<>();
readLock();
try {
idToRunningTransactionState.values().stream()
.filter(t -> (t.getCoordinator().sourceType == TransactionState.TxnSourceType.BE
&& t.getCoordinator().ip.equals(coordinateHost)))
.limit(limit)
.forEach(t -> txnInfos.add(new Pair<>(t.getDbId(), t.getTransactionId())));
} finally {
readUnlock();
}
return txnInfos;
}
public List<List<String>> getSingleTranInfo(long dbId, long txnId) throws AnalysisException {
List<List<String>> infos = new ArrayList<List<String>>();
readLock();
try {
Database db = Catalog.getCurrentCatalog().getDb(dbId);
if (db == null) {
throw new AnalysisException("Database[" + dbId + "] does not exist");
}
TransactionState txnState = unprotectedGetTransactionState(txnId);
if (txnState == null) {
throw new AnalysisException("transaction with id " + txnId + " does not exist");
}
if (ConnectContext.get() != null) {
Set<Long> tblIds = txnState.getIdToTableCommitInfos().keySet();
for (Long tblId : tblIds) {
Table tbl = db.getTable(tblId);
if (tbl != null) {
if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), db.getFullName(),
tbl.getName(), PrivPredicate.SHOW)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR,
"SHOW TRANSACTION",
ConnectContext.get().getQualifiedUser(),
ConnectContext.get().getRemoteIP(),
tbl.getName());
}
}
}
}
List<String> info = Lists.newArrayList();
getTxnStateInfo(txnState, info);
infos.add(info);
} finally {
readUnlock();
}
return infos;
}
protected void checkRunningTxnExceedLimit(TransactionState.LoadJobSourceType sourceType)
throws BeginTransactionException {
switch (sourceType) {
case ROUTINE_LOAD_TASK:
break;
default:
if (runningTxnNums >= Config.max_running_txn_num_per_db) {
throw new BeginTransactionException("current running txns on db " + dbId + " is "
+ runningTxnNums + ", larger than limit " + Config.max_running_txn_num_per_db);
}
break;
}
}
private void updateCatalogAfterCommitted(TransactionState transactionState, Database db) {
Set<Long> errorReplicaIds = transactionState.getErrorReplicas();
for (TableCommitInfo tableCommitInfo : transactionState.getIdToTableCommitInfos().values()) {
long tableId = tableCommitInfo.getTableId();
OlapTable table = (OlapTable) db.getTable(tableId);
for (PartitionCommitInfo partitionCommitInfo : tableCommitInfo.getIdToPartitionCommitInfo().values()) {
long partitionId = partitionCommitInfo.getPartitionId();
Partition partition = table.getPartition(partitionId);
if (!partition.isUseStarOS()) {
List<MaterializedIndex> allIndices =
partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL);
for (MaterializedIndex index : allIndices) {
for (Tablet tablet : index.getTablets()) {
for (Replica replica : ((LocalTablet) tablet).getReplicas()) {
if (errorReplicaIds.contains(replica.getId())) {
replica.updateLastFailedVersion(partitionCommitInfo.getVersion());
}
}
}
}
}
partition.setNextVersion(partition.getNextVersion() + 1);
}
}
}
private boolean updateCatalogAfterVisible(TransactionState transactionState, Database db) {
Set<Long> errorReplicaIds = transactionState.getErrorReplicas();
for (TableCommitInfo tableCommitInfo : transactionState.getIdToTableCommitInfos().values()) {
long tableId = tableCommitInfo.getTableId();
OlapTable table = (OlapTable) db.getTable(tableId);
List<String> validDictCacheColumns = Lists.newArrayList();
long maxPartitionVersionTime = -1;
for (PartitionCommitInfo partitionCommitInfo : tableCommitInfo.getIdToPartitionCommitInfo().values()) {
long partitionId = partitionCommitInfo.getPartitionId();
long newCommitVersion = partitionCommitInfo.getVersion();
Partition partition = table.getPartition(partitionId);
if (!partition.isUseStarOS()) {
List<MaterializedIndex> allIndices =
partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL);
for (MaterializedIndex index : allIndices) {
for (Tablet tablet : index.getTablets()) {
for (Replica replica : ((LocalTablet) tablet).getReplicas()) {
long lastFailedVersion = replica.getLastFailedVersion();
long newVersion = newCommitVersion;
long lastSucessVersion = replica.getLastSuccessVersion();
if (!errorReplicaIds.contains(replica.getId())) {
if (replica.getLastFailedVersion() > 0) {
newVersion = replica.getVersion();
} else if (!replica.checkVersionCatchUp(partition.getVisibleVersion(), true)) {
lastFailedVersion = partition.getVisibleVersion();
newVersion = replica.getVersion();
}
lastSucessVersion = newCommitVersion;
} else {
newVersion = replica.getVersion();
if (newCommitVersion > lastFailedVersion) {
lastFailedVersion = newCommitVersion;
}
}
replica.updateVersionInfo(newVersion, lastFailedVersion, lastSucessVersion);
}
}
}
}
long version = partitionCommitInfo.getVersion();
long versionTime = partitionCommitInfo.getVersionTime();
partition.updateVisibleVersion(version, versionTime);
if (LOG.isDebugEnabled()) {
LOG.debug("transaction state {} set partition {}'s version to [{}]",
transactionState, partition.getId(), version);
}
if (!partitionCommitInfo.getInvalidDictCacheColumns().isEmpty()) {
for (String column : partitionCommitInfo.getInvalidDictCacheColumns()) {
IDictManager.getInstance().removeGlobalDict(tableId, column);
}
}
if (!partitionCommitInfo.getValidDictCacheColumns().isEmpty()) {
validDictCacheColumns = partitionCommitInfo.getValidDictCacheColumns();
}
maxPartitionVersionTime = Math.max(maxPartitionVersionTime, versionTime);
}
for (String column : validDictCacheColumns) {
IDictManager.getInstance().updateGlobalDict(tableId, column, maxPartitionVersionTime);
}
}
return true;
}
public boolean isPreviousTransactionsFinished(long endTransactionId, List<Long> tableIdList) {
readLock();
try {
for (Map.Entry<Long, TransactionState> entry : idToRunningTransactionState.entrySet()) {
if (entry.getValue().getDbId() != dbId || !isIntersectionNotEmpty(entry.getValue().getTableIdList(),
tableIdList) || !entry.getValue().isRunning()) {
continue;
}
if (entry.getKey() <= endTransactionId) {
LOG.debug("find a running txn with txn_id={} on db: {}, less than watermark txn_id {}",
entry.getKey(), dbId, endTransactionId);
return false;
}
}
} finally {
readUnlock();
}
return true;
}
/**
* check if there exists a intersection between the source tableId list and target tableId list
* if one of them is null or empty, that means that we don't know related tables in tableList,
* we think the two lists may have intersection for right ordered txns
*/
public boolean isIntersectionNotEmpty(List<Long> sourceTableIdList, List<Long> targetTableIdList) {
if (CollectionUtils.isEmpty(sourceTableIdList) || CollectionUtils.isEmpty(targetTableIdList)) {
return true;
}
for (Long srcValue : sourceTableIdList) {
for (Long targetValue : targetTableIdList) {
if (srcValue.equals(targetValue)) {
return true;
}
}
}
return false;
}
public List<Long> getTimeoutTxns(long currentMillis) {
List<Long> timeoutTxns = Lists.newArrayList();
readLock();
try {
for (TransactionState transactionState : idToRunningTransactionState.values()) {
if (transactionState.isTimeout(currentMillis)) {
timeoutTxns.add(transactionState.getTransactionId());
}
}
} finally {
readUnlock();
}
return timeoutTxns;
}
public void abortTimeoutTxns(long currentMillis) {
List<Long> timeoutTxns = getTimeoutTxns(currentMillis);
for (Long txnId : timeoutTxns) {
try {
abortTransaction(txnId, "timeout by txn manager", null);
LOG.info("transaction [" + txnId + "] is timeout, abort it by transaction manager");
} catch (UserException e) {
LOG.warn("abort timeout txn {} failed. msg: {}", txnId, e.getMessage());
}
}
}
public void replayUpsertTransactionState(TransactionState transactionState) {
writeLock();
try {
transactionState.replaySetTransactionStatus();
Database db = catalog.getDb(transactionState.getDbId());
if (transactionState.getTransactionStatus() == TransactionStatus.COMMITTED) {
LOG.info("replay a committed transaction {}", transactionState);
updateCatalogAfterCommitted(transactionState, db);
} else if (transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) {
LOG.info("replay a visible transaction {}", transactionState);
updateCatalogAfterVisible(transactionState, db);
}
unprotectUpsertTransactionState(transactionState, true);
} finally {
writeUnlock();
}
}
public List<List<String>> getDbTransStateInfo() {
List<List<String>> infos = Lists.newArrayList();
readLock();
try {
infos.add(Lists.newArrayList("running", String.valueOf(
runningTxnNums + runningRoutineLoadTxnNums)));
long finishedNum = getFinishedTxnNums();
infos.add(Lists.newArrayList("finished", String.valueOf(finishedNum)));
} finally {
readUnlock();
}
return infos;
}
public void unprotectWriteAllTransactionStates(DataOutput out) throws IOException {
for (Map.Entry<Long, TransactionState> entry : idToRunningTransactionState.entrySet()) {
entry.getValue().write(out);
}
for (TransactionState transactionState : finalStatusTransactionStateDeque) {
transactionState.write(out);
}
}
} | class DatabaseTransactionMgr {
private static final Logger LOG = LogManager.getLogger(DatabaseTransactionMgr.class);
private long dbId;
private ReentrantReadWriteLock transactionLock = new ReentrantReadWriteLock(true);
private Map<Long, TransactionState> idToRunningTransactionState = Maps.newHashMap();
private Map<Long, TransactionState> idToFinalStatusTransactionState = Maps.newHashMap();
private ArrayDeque<TransactionState> finalStatusTransactionStateDeque = new ArrayDeque<>();
private Map<String, Set<Long>> labelToTxnIds = Maps.newHashMap();
private int runningTxnNums = 0;
private int runningRoutineLoadTxnNums = 0;
private Catalog catalog;
private EditLog editLog;
private TransactionIdGenerator idGenerator;
private List<ClearTransactionTask> clearTransactionTasks = Lists.newArrayList();
private volatile long usedQuotaDataBytes = -1;
protected void readLock() {
this.transactionLock.readLock().lock();
}
protected void readUnlock() {
this.transactionLock.readLock().unlock();
}
protected void writeLock() {
this.transactionLock.writeLock().lock();
}
protected void writeUnlock() {
this.transactionLock.writeLock().unlock();
}
public DatabaseTransactionMgr(long dbId, Catalog catalog, TransactionIdGenerator idGenerator) {
this.dbId = dbId;
this.catalog = catalog;
this.idGenerator = idGenerator;
this.editLog = catalog.getEditLog();
}
public long getDbId() {
return dbId;
}
public TransactionState getTransactionState(Long transactionId) {
readLock();
try {
TransactionState transactionState = idToRunningTransactionState.get(transactionId);
if (transactionState != null) {
return transactionState;
} else {
return idToFinalStatusTransactionState.get(transactionId);
}
} finally {
readUnlock();
}
}
private TransactionState unprotectedGetTransactionState(Long transactionId) {
TransactionState transactionState = idToRunningTransactionState.get(transactionId);
if (transactionState != null) {
return transactionState;
} else {
return idToFinalStatusTransactionState.get(transactionId);
}
}
@VisibleForTesting
protected Set<Long> unprotectedGetTxnIdsByLabel(String label) {
return labelToTxnIds.get(label);
}
@VisibleForTesting
protected int getRunningTxnNums() {
return runningTxnNums;
}
@VisibleForTesting
protected int getRunningRoutineLoadTxnNums() {
return runningRoutineLoadTxnNums;
}
@VisibleForTesting
protected int getFinishedTxnNums() {
return finalStatusTransactionStateDeque.size();
}
public List<List<String>> getTxnStateInfoList(boolean running, int limit) {
List<List<String>> infos = Lists.newArrayList();
Collection<TransactionState> transactionStateCollection = null;
readLock();
try {
if (running) {
transactionStateCollection = idToRunningTransactionState.values();
} else {
transactionStateCollection = finalStatusTransactionStateDeque;
}
transactionStateCollection.stream()
.sorted(TransactionState.TXN_ID_COMPARATOR)
.limit(limit)
.forEach(t -> {
List<String> info = Lists.newArrayList();
getTxnStateInfo(t, info);
infos.add(info);
});
} finally {
readUnlock();
}
return infos;
}
private void getTxnStateInfo(TransactionState txnState, List<String> info) {
info.add(String.valueOf(txnState.getTransactionId()));
info.add(txnState.getLabel());
info.add(txnState.getCoordinator().toString());
info.add(txnState.getTransactionStatus().name());
info.add(txnState.getSourceType().name());
info.add(TimeUtils.longToTimeString(txnState.getPrepareTime()));
info.add(TimeUtils.longToTimeString(txnState.getCommitTime()));
info.add(TimeUtils.longToTimeString(txnState.getPublishVersionTime()));
info.add(TimeUtils.longToTimeString(txnState.getFinishTime()));
info.add(txnState.getReason());
info.add(String.valueOf(txnState.getErrorReplicas().size()));
info.add(String.valueOf(txnState.getCallbackId()));
info.add(String.valueOf(txnState.getTimeoutMs()));
info.add(txnState.getErrMsg());
}
public long beginTransaction(List<Long> tableIdList, String label, TUniqueId requestId,
TransactionState.TxnCoordinator coordinator,
TransactionState.LoadJobSourceType sourceType, long listenerId, long timeoutSecond)
throws DuplicatedRequestException, LabelAlreadyUsedException, BeginTransactionException, AnalysisException {
checkDatabaseDataQuota();
writeLock();
try {
Preconditions.checkNotNull(coordinator);
Preconditions.checkNotNull(label);
FeNameFormat.checkLabel(label);
/*
* Check if label already used, by following steps
* 1. get all existing transactions
* 2. if there is a PREPARE transaction, check if this is a retry request. If yes, return the
* existing txn id.
* 3. if there is a non-aborted transaction, throw label already used exception.
*/
Set<Long> existingTxnIds = unprotectedGetTxnIdsByLabel(label);
if (existingTxnIds != null && !existingTxnIds.isEmpty()) {
List<TransactionState> notAbortedTxns = Lists.newArrayList();
for (long txnId : existingTxnIds) {
TransactionState txn = unprotectedGetTransactionState(txnId);
Preconditions.checkNotNull(txn);
if (txn.getTransactionStatus() != TransactionStatus.ABORTED) {
notAbortedTxns.add(txn);
}
}
Preconditions.checkState(notAbortedTxns.size() <= 1, notAbortedTxns);
if (!notAbortedTxns.isEmpty()) {
TransactionState notAbortedTxn = notAbortedTxns.get(0);
if (requestId != null && notAbortedTxn.getTransactionStatus() == TransactionStatus.PREPARE
&& notAbortedTxn.getRequestId() != null && notAbortedTxn.getRequestId().equals(requestId)) {
throw new DuplicatedRequestException(DebugUtil.printId(requestId),
notAbortedTxn.getTransactionId(), "");
}
throw new LabelAlreadyUsedException(label, notAbortedTxn.getTransactionStatus());
}
}
checkRunningTxnExceedLimit(sourceType);
long tid = idGenerator.getNextTransactionId();
LOG.info("begin transaction: txn id {} with label {} from coordinator {}, listner id: {}",
tid, label, coordinator, listenerId);
TransactionState transactionState =
new TransactionState(dbId, tableIdList, tid, label, requestId, sourceType,
coordinator, listenerId, timeoutSecond * 1000);
transactionState.setPrepareTime(System.currentTimeMillis());
unprotectUpsertTransactionState(transactionState, false);
if (MetricRepo.isInit) {
MetricRepo.COUNTER_TXN_BEGIN.increase(1L);
}
return tid;
} catch (DuplicatedRequestException e) {
throw e;
} catch (Exception e) {
if (MetricRepo.isInit) {
MetricRepo.COUNTER_TXN_REJECT.increase(1L);
}
throw e;
} finally {
writeUnlock();
}
}
private void checkDatabaseDataQuota() throws AnalysisException {
Database db = catalog.getDb(dbId);
if (db == null) {
throw new AnalysisException("Database[" + dbId + "] does not exist");
}
if (usedQuotaDataBytes == -1) {
usedQuotaDataBytes = db.getUsedDataQuotaWithLock();
}
long dataQuotaBytes = db.getDataQuota();
if (usedQuotaDataBytes >= dataQuotaBytes) {
Pair<Double, String> quotaUnitPair = DebugUtil.getByteUint(dataQuotaBytes);
String readableQuota = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(quotaUnitPair.first) + " "
+ quotaUnitPair.second;
throw new AnalysisException("Database[" + db.getFullName()
+ "] data size exceeds quota[" + readableQuota + "]");
}
}
public void updateDatabaseUsedQuotaData(long usedQuotaDataBytes) {
this.usedQuotaDataBytes = usedQuotaDataBytes;
}
/**
* commit transaction process as follows:
* 1. validate whether `Load` is cancelled
* 2. validate whether `Table` is deleted
* 3. validate replicas consistency
* 4. update transaction state version
* 5. persistent transactionState
* 6. update nextVersion because of the failure of persistent transaction resulting in error version
*/
public void commitTransaction(long transactionId, List<TabletCommitInfo> tabletCommitInfos,
TxnCommitAttachment txnCommitAttachment)
throws UserException {
Database db = catalog.getDb(dbId);
if (null == db) {
throw new MetaNotFoundException("could not find db [" + dbId + "]");
}
TransactionState transactionState = null;
readLock();
try {
transactionState = unprotectedGetTransactionState(transactionId);
} finally {
readUnlock();
}
if (transactionState == null
|| transactionState.getTransactionStatus() == TransactionStatus.ABORTED) {
throw new TransactionCommitFailedException(
transactionState == null ? "transaction not found" : transactionState.getReason());
}
if (transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) {
LOG.debug("transaction is already visible: {}", transactionId);
return;
}
if (transactionState.getTransactionStatus() == TransactionStatus.COMMITTED) {
LOG.debug("transaction is already committed: {}", transactionId);
return;
}
if (tabletCommitInfos == null || tabletCommitInfos.isEmpty()) {
throw new TransactionCommitFailedException(TransactionCommitFailedException.NO_DATA_TO_LOAD_MSG);
}
if (txnCommitAttachment != null) {
transactionState.setTxnCommitAttachment(txnCommitAttachment);
}
TabletInvertedIndex tabletInvertedIndex = catalog.getTabletInvertedIndex();
Map<Long, Set<Long>> tabletToBackends = new HashMap<>();
Map<Long, Set<Long>> tableToPartition = new HashMap<>();
Map<Long, Set<String>> tableToInvalidDictCacheColumns = new HashMap<>();
Map<Long, Set<String>> tableToValidDictCacheColumns = new HashMap<>();
List<Long> tabletIds = tabletCommitInfos.stream().map(
TabletCommitInfo::getTabletId).collect(Collectors.toList());
List<TabletMeta> tabletMetaList = tabletInvertedIndex.getTabletMetaList(tabletIds);
for (int i = 0; i < tabletMetaList.size(); i++) {
TabletMeta tabletMeta = tabletMetaList.get(i);
if (tabletMeta == TabletInvertedIndex.NOT_EXIST_TABLET_META) {
continue;
}
long tabletId = tabletIds.get(i);
long tableId = tabletMeta.getTableId();
OlapTable tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
continue;
}
if (tbl.getState() == OlapTable.OlapTableState.RESTORE) {
throw new LoadException("Table " + tbl.getName() + " is in restore process. "
+ "Can not load into it");
}
long partitionId = tabletMeta.getPartitionId();
if (tbl.getPartition(partitionId) == null) {
continue;
}
if (!tableToPartition.containsKey(tableId)) {
tableToPartition.put(tableId, new HashSet<>());
}
tableToPartition.get(tableId).add(partitionId);
if (!tabletToBackends.containsKey(tabletId)) {
tabletToBackends.put(tabletId, new HashSet<>());
}
tabletToBackends.get(tabletId).add(tabletCommitInfos.get(i).getBackendId());
if (!tableToInvalidDictCacheColumns.containsKey(tableId)) {
tableToInvalidDictCacheColumns.put(tableId, new HashSet<>());
}
tableToInvalidDictCacheColumns.get(tableId).addAll(tabletCommitInfos.get(i).getInvalidDictCacheColumns());
if (!tableToValidDictCacheColumns.containsKey(tableId)) {
tableToValidDictCacheColumns.put(tableId, new HashSet<>());
}
if (tableToValidDictCacheColumns.get(tableId).isEmpty() &&
!tabletCommitInfos.get(i).getValidDictCacheColumns().isEmpty()) {
tableToValidDictCacheColumns.get(tableId).addAll(tabletCommitInfos.get(i).getValidDictCacheColumns());
}
if (i == tabletMetaList.size() - 1) {
tableToValidDictCacheColumns.get(tableId).removeAll(tableToInvalidDictCacheColumns.get(tableId));
}
}
if (tableToPartition.isEmpty()) {
throw new TransactionCommitFailedException(TransactionCommitFailedException.NO_DATA_TO_LOAD_MSG);
}
Set<Long> errorReplicaIds = Sets.newHashSet();
Set<Long> totalInvolvedBackends = Sets.newHashSet();
for (long tableId : tableToPartition.keySet()) {
OlapTable table = (OlapTable) db.getTable(tableId);
if (table == null) {
throw new MetaNotFoundException("Table does not exist: " + tableId);
}
for (Partition partition : table.getAllPartitions()) {
if (!tableToPartition.get(tableId).contains(partition.getId())) {
continue;
}
boolean useStarOS = partition.isUseStarOS();
List<MaterializedIndex> allIndices = transactionState.getPartitionLoadedTblIndexes(tableId, partition);
int quorumReplicaNum = table.getPartitionInfo().getQuorumNum(partition.getId());
for (MaterializedIndex index : allIndices) {
for (Tablet tablet : index.getTablets()) {
long tabletId = tablet.getId();
Set<Long> commitBackends = tabletToBackends.get(tabletId);
if (useStarOS) {
long backendId = ((StarOSTablet) tablet).getPrimaryBackendId();
totalInvolvedBackends.add(backendId);
if (!commitBackends.contains(backendId)) {
throw new TransactionCommitFailedException(
"Primary backend: " + backendId + " does not in commit backends: " +
Joiner.on(",").join(commitBackends));
}
} else {
Set<Long> tabletBackends = tablet.getBackendIds();
totalInvolvedBackends.addAll(tabletBackends);
Set<Long> errorBackendIdsForTablet = Sets.newHashSet();
int successReplicaNum = 0;
for (long tabletBackend : tabletBackends) {
Replica replica = tabletInvertedIndex.getReplica(tabletId, tabletBackend);
if (replica == null) {
Backend backend = Catalog.getCurrentSystemInfo().getBackend(tabletBackend);
throw new TransactionCommitFailedException("Not found replicas of tablet. "
+ "tablet_id: " + tabletId + ", backend_id: " + backend.getHost());
}
if (commitBackends != null && commitBackends.contains(tabletBackend)) {
if (replica.getLastFailedVersion() < 0) {
++successReplicaNum;
}
} else {
errorBackendIdsForTablet.add(tabletBackend);
errorReplicaIds.add(replica.getId());
}
}
if (successReplicaNum < quorumReplicaNum) {
List<String> errorBackends = new ArrayList<String>();
for (long backendId : errorBackendIdsForTablet) {
Backend backend = Catalog.getCurrentSystemInfo().getBackend(backendId);
errorBackends.add(backend.getId() + ":" + backend.getHost());
}
LOG.warn("Fail to load files. tablet_id: {}, txn_id: {}, backends: {}",
tablet.getId(), transactionId,
Joiner.on(",").join(errorBackends));
throw new TabletQuorumFailedException(tablet.getId(), transactionId, errorBackends);
}
}
}
}
}
}
transactionState.beforeStateTransform(TransactionStatus.COMMITTED);
boolean txnOperated = false;
writeLock();
try {
unprotectedCommitTransaction(transactionState, errorReplicaIds, tableToPartition,
tableToInvalidDictCacheColumns, tableToValidDictCacheColumns,
totalInvolvedBackends, db);
txnOperated = true;
} finally {
writeUnlock();
transactionState.afterStateTransform(TransactionStatus.COMMITTED, txnOperated);
}
updateCatalogAfterCommitted(transactionState, db);
LOG.info("transaction:[{}] successfully committed", transactionState);
}
public boolean publishTransaction(Database db, long transactionId, long timeoutMillis)
throws TransactionCommitFailedException {
TransactionState transactionState = null;
readLock();
try {
transactionState = unprotectedGetTransactionState(transactionId);
} finally {
readUnlock();
}
switch (transactionState.getTransactionStatus()) {
case COMMITTED:
case VISIBLE:
break;
default:
LOG.warn("transaction commit failed, db={}, txn={}", db.getFullName(), transactionId);
throw new TransactionCommitFailedException("transaction commit failed");
}
long currentTimeMillis = System.currentTimeMillis();
long timeoutTimeMillis = currentTimeMillis + timeoutMillis;
while (currentTimeMillis < timeoutTimeMillis &&
transactionState.getTransactionStatus() == TransactionStatus.COMMITTED) {
try {
transactionState.waitTransactionVisible(timeoutMillis);
} catch (InterruptedException e) {
LOG.info("timed out while waiting for transaction {} to be visible", transactionId);
}
currentTimeMillis = System.currentTimeMillis();
}
return transactionState.getTransactionStatus() == TransactionStatus.VISIBLE;
}
public void deleteTransaction(TransactionState transactionState) {
writeLock();
try {
if (!finalStatusTransactionStateDeque.isEmpty() &&
transactionState.getTransactionId() ==
finalStatusTransactionStateDeque.getFirst().getTransactionId()) {
finalStatusTransactionStateDeque.pop();
clearTransactionState(transactionState);
}
} finally {
writeUnlock();
}
}
public TransactionStatus getLabelState(String label) {
readLock();
try {
Set<Long> existingTxnIds = unprotectedGetTxnIdsByLabel(label);
if (existingTxnIds == null || existingTxnIds.isEmpty()) {
return TransactionStatus.UNKNOWN;
}
long maxTxnId = existingTxnIds.stream().max(Comparator.comparingLong(Long::valueOf)).get();
return unprotectedGetTransactionState(maxTxnId).getTransactionStatus();
} finally {
readUnlock();
}
}
public List<TransactionState> getCommittedTxnList() {
readLock();
try {
return idToRunningTransactionState.values().stream()
.filter(transactionState -> (transactionState.getTransactionStatus() ==
TransactionStatus.COMMITTED))
.sorted(Comparator.comparing(TransactionState::getCommitTime))
.collect(Collectors.toList());
} finally {
readUnlock();
}
}
public void finishTransaction(long transactionId, Set<Long> errorReplicaIds) throws UserException {
TransactionState transactionState = null;
readLock();
try {
transactionState = unprotectedGetTransactionState(transactionId);
} finally {
readUnlock();
}
if (errorReplicaIds == null) {
errorReplicaIds = Sets.newHashSet();
}
Set<Long> originalErrorReplicas = transactionState.getErrorReplicas();
if (originalErrorReplicas != null) {
errorReplicaIds.addAll(originalErrorReplicas);
}
Database db = catalog.getDb(transactionState.getDbId());
if (db == null) {
writeLock();
try {
transactionState.setTransactionStatus(TransactionStatus.ABORTED);
transactionState.setReason("db is dropped");
LOG.warn("db is dropped during transaction, abort transaction {}", transactionState);
unprotectUpsertTransactionState(transactionState, false);
return;
} finally {
writeUnlock();
}
}
db.writeLock();
try {
boolean hasError = false;
for (TableCommitInfo tableCommitInfo : transactionState.getIdToTableCommitInfos().values()) {
long tableId = tableCommitInfo.getTableId();
OlapTable table = (OlapTable) db.getTable(tableId);
if (table == null) {
transactionState.removeTable(tableId);
LOG.warn("table {} is dropped, skip version check and remove it from transaction state {}",
tableId,
transactionState);
continue;
}
PartitionInfo partitionInfo = table.getPartitionInfo();
for (PartitionCommitInfo partitionCommitInfo : tableCommitInfo.getIdToPartitionCommitInfo().values()) {
long partitionId = partitionCommitInfo.getPartitionId();
Partition partition = table.getPartition(partitionId);
if (partition == null) {
tableCommitInfo.removePartition(partitionId);
LOG.warn("partition {} is dropped, skip version check and remove it from transaction state {}",
partitionId,
transactionState);
continue;
}
if (partition.getVisibleVersion() != partitionCommitInfo.getVersion() - 1) {
if (transactionState.getLastErrTimeMs() + 3000 < System.nanoTime() / 1000000) {
LOG.debug("transactionId {} partition commitInfo version {} is not equal with " +
"partition visible version {} plus one, need wait",
transactionId,
partitionCommitInfo.getVersion(),
partition.getVisibleVersion());
}
String errMsg =
String.format("wait for publishing partition %d version %d. self version: %d. table %d",
partitionId, partition.getVisibleVersion() + 1,
partitionCommitInfo.getVersion(), tableId);
transactionState.setErrorMsg(errMsg);
return;
}
if (partition.isUseStarOS()) {
continue;
}
int quorumReplicaNum = partitionInfo.getQuorumNum(partitionId);
List<MaterializedIndex> allIndices =
transactionState.getPartitionLoadedTblIndexes(tableId, partition);
for (MaterializedIndex index : allIndices) {
for (Tablet tablet : index.getTablets()) {
int healthReplicaNum = 0;
for (Replica replica : ((LocalTablet) tablet).getReplicas()) {
if (!errorReplicaIds.contains(replica.getId())
&& replica.getLastFailedVersion() < 0) {
if (replica.checkVersionCatchUp(partition.getVisibleVersion(), true)) {
replica.updateRowCount(partitionCommitInfo.getVersion(),
replica.getDataSize(), replica.getRowCount());
++healthReplicaNum;
} else {
replica.updateVersionInfo(replica.getVersion(),
partition.getVisibleVersion(),
partitionCommitInfo.getVersion());
LOG.warn("transaction state {} has error, the replica [{}] not appeared " +
"in error replica list and its version not equal to partition " +
"commit version or commit version - 1 if its not a upgrate " +
"stage, its a fatal error. ",
transactionState, replica);
}
} else if (replica.getVersion() >= partitionCommitInfo.getVersion()) {
errorReplicaIds.remove(replica.getId());
++healthReplicaNum;
}
}
if (healthReplicaNum < quorumReplicaNum) {
if (transactionState.getLastErrTimeMs() + 3000 < System.nanoTime() / 1000000) {
LOG.info("publish version failed for transaction {} on tablet {}, with only {} " +
"replicas less than quorum {}", transactionState, tablet, healthReplicaNum,
quorumReplicaNum);
}
String errMsg = String.format(
"publish on tablet %d failed. succeed replica num %d less than quorum %d."
+ " table: %d, partition: %d, publish version: %d",
tablet.getId(), healthReplicaNum, quorumReplicaNum, tableId, partitionId,
partition.getVisibleVersion() + 1);
transactionState.setErrorMsg(errMsg);
hasError = true;
}
}
}
}
}
if (hasError) {
return;
}
boolean txnOperated = false;
writeLock();
try {
transactionState.setErrorReplicas(errorReplicaIds);
transactionState.setFinishTime(System.currentTimeMillis());
transactionState.clearErrorMsg();
transactionState.setTransactionStatus(TransactionStatus.VISIBLE);
unprotectUpsertTransactionState(transactionState, false);
txnOperated = true;
LOG.debug("after set transaction {} to visible", transactionState);
} finally {
writeUnlock();
transactionState.afterStateTransform(TransactionStatus.VISIBLE, txnOperated);
}
updateCatalogAfterVisible(transactionState, db);
} finally {
db.writeUnlock();
}
LOG.info("finish transaction {} successfully", transactionState);
}
protected void unprotectedCommitTransaction(TransactionState transactionState, Set<Long> errorReplicaIds,
Map<Long, Set<Long>> tableToPartition,
Map<Long, Set<String>> tableToInvalidDictColumns,
Map<Long, Set<String>> tableToValidDictColumns,
Set<Long> totalInvolvedBackends,
Database db) {
if (transactionState.getTransactionStatus() != TransactionStatus.PREPARE) {
return;
}
transactionState.setCommitTime(System.currentTimeMillis());
transactionState.setTransactionStatus(TransactionStatus.COMMITTED);
transactionState.setErrorReplicas(errorReplicaIds);
for (long tableId : tableToPartition.keySet()) {
TableCommitInfo tableCommitInfo = new TableCommitInfo(tableId);
boolean isFirstPartition = true;
for (long partitionId : tableToPartition.get(tableId)) {
OlapTable table = (OlapTable) db.getTable(tableId);
Partition partition = table.getPartition(partitionId);
PartitionCommitInfo partitionCommitInfo;
if (isFirstPartition) {
partitionCommitInfo = new PartitionCommitInfo(partitionId,
partition.getNextVersion(),
System.currentTimeMillis(),
Lists.newArrayList(tableToInvalidDictColumns.get(tableId)),
Lists.newArrayList(tableToValidDictColumns.get(tableId)));
} else {
partitionCommitInfo = new PartitionCommitInfo(partitionId,
partition.getNextVersion(),
System.currentTimeMillis() /* use as partition visible time */);
}
tableCommitInfo.addPartitionCommitInfo(partitionCommitInfo);
isFirstPartition = false;
}
transactionState.putIdToTableCommitInfo(tableId, tableCommitInfo);
}
unprotectUpsertTransactionState(transactionState, false);
for (long backendId : totalInvolvedBackends) {
transactionState.addPublishVersionTask(backendId, null);
}
}
protected void unprotectUpsertTransactionState(TransactionState transactionState, boolean isReplay) {
if (!isReplay) {
if (transactionState.getTransactionStatus() != TransactionStatus.PREPARE
|| transactionState.getSourceType() == TransactionState.LoadJobSourceType.FRONTEND) {
editLog.logInsertTransactionState(transactionState);
}
}
if (!transactionState.getTransactionStatus().isFinalStatus()) {
if (idToRunningTransactionState.put(transactionState.getTransactionId(), transactionState) == null) {
if (transactionState.getSourceType() == TransactionState.LoadJobSourceType.ROUTINE_LOAD_TASK) {
runningRoutineLoadTxnNums++;
} else {
runningTxnNums++;
}
}
} else {
if (idToRunningTransactionState.remove(transactionState.getTransactionId()) != null) {
if (transactionState.getSourceType() == TransactionState.LoadJobSourceType.ROUTINE_LOAD_TASK) {
runningRoutineLoadTxnNums--;
} else {
runningTxnNums--;
}
}
idToFinalStatusTransactionState.put(transactionState.getTransactionId(), transactionState);
finalStatusTransactionStateDeque.add(transactionState);
}
updateTxnLabels(transactionState);
}
private void updateTxnLabels(TransactionState transactionState) {
Set<Long> txnIds = labelToTxnIds.get(transactionState.getLabel());
if (txnIds == null) {
txnIds = Sets.newHashSet();
labelToTxnIds.put(transactionState.getLabel(), txnIds);
}
txnIds.add(transactionState.getTransactionId());
}
public void abortTransaction(String label, String reason) throws UserException {
Preconditions.checkNotNull(label);
long transactionId = -1;
readLock();
try {
Set<Long> existingTxns = unprotectedGetTxnIdsByLabel(label);
if (existingTxns == null || existingTxns.isEmpty()) {
throw new TransactionNotFoundException("transaction not found, label=" + label);
}
TransactionState prepareTxn = null;
for (Long txnId : existingTxns) {
TransactionState txn = unprotectedGetTransactionState(txnId);
if (txn.getTransactionStatus() == TransactionStatus.PREPARE) {
prepareTxn = txn;
break;
}
}
if (prepareTxn == null) {
throw new TransactionNotFoundException("running transaction not found, label=" + label);
}
transactionId = prepareTxn.getTransactionId();
} finally {
readUnlock();
}
abortTransaction(transactionId, reason, null);
}
public void abortTransaction(long transactionId, String reason, TxnCommitAttachment txnCommitAttachment)
throws UserException {
if (transactionId < 0) {
LOG.info("transaction id is {}, less than 0, maybe this is an old type load job, ignore abort operation",
transactionId);
return;
}
TransactionState transactionState = null;
readLock();
try {
transactionState = idToRunningTransactionState.get(transactionId);
} finally {
readUnlock();
}
if (transactionState == null) {
throw new TransactionNotFoundException("transaction not found", transactionId);
}
if (txnCommitAttachment != null) {
transactionState.setTxnCommitAttachment(txnCommitAttachment);
}
transactionState.beforeStateTransform(TransactionStatus.ABORTED);
boolean txnOperated = false;
writeLock();
try {
txnOperated = unprotectAbortTransaction(transactionId, reason);
} finally {
writeUnlock();
transactionState.afterStateTransform(TransactionStatus.ABORTED, txnOperated, reason);
}
if (txnOperated && transactionState.getTransactionStatus() == TransactionStatus.ABORTED) {
clearBackendTransactions(transactionState);
}
}
private boolean unprotectAbortTransaction(long transactionId, String reason)
throws UserException {
TransactionState transactionState = unprotectedGetTransactionState(transactionId);
if (transactionState == null) {
throw new TransactionNotFoundException("transaction not found", transactionId);
}
if (transactionState.getTransactionStatus() == TransactionStatus.ABORTED) {
return false;
}
if (transactionState.getTransactionStatus() == TransactionStatus.COMMITTED
|| transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) {
throw new UserException("transaction's state is already "
+ transactionState.getTransactionStatus() + ", could not abort");
}
transactionState.setFinishTime(System.currentTimeMillis());
transactionState.setReason(reason);
transactionState.setTransactionStatus(TransactionStatus.ABORTED);
unprotectUpsertTransactionState(transactionState, false);
for (PublishVersionTask task : transactionState.getPublishVersionTasks().values()) {
AgentTaskQueue.removeTask(task.getBackendId(), TTaskType.PUBLISH_VERSION, task.getSignature());
}
return true;
}
private void clearBackendTransactions(TransactionState transactionState) {
Preconditions.checkState(transactionState.getTransactionStatus() == TransactionStatus.ABORTED);
List<Long> allBeIds = Catalog.getCurrentSystemInfo().getBackendIds(false);
AgentBatchTask batchTask = null;
synchronized (clearTransactionTasks) {
for (Long beId : allBeIds) {
ClearTransactionTask task =
new ClearTransactionTask(beId, transactionState.getTransactionId(), Lists.newArrayList());
clearTransactionTasks.add(task);
}
if (clearTransactionTasks.size() > allBeIds.size() * 2) {
batchTask = new AgentBatchTask();
for (ClearTransactionTask clearTransactionTask : clearTransactionTasks) {
batchTask.addTask(clearTransactionTask);
}
clearTransactionTasks.clear();
}
}
if (batchTask != null) {
AgentTaskExecutor.submit(batchTask);
}
}
protected List<List<Comparable>> getTableTransInfo(long txnId) throws AnalysisException {
List<List<Comparable>> tableInfos = new ArrayList<>();
readLock();
try {
TransactionState transactionState = unprotectedGetTransactionState(txnId);
if (null == transactionState) {
throw new AnalysisException("Transaction[" + txnId + "] does not exist.");
}
for (Map.Entry<Long, TableCommitInfo> entry : transactionState.getIdToTableCommitInfos().entrySet()) {
List<Comparable> tableInfo = new ArrayList<>();
tableInfo.add(entry.getKey());
tableInfo.add(Joiner.on(", ").join(entry.getValue().getIdToPartitionCommitInfo().values().stream().map(
PartitionCommitInfo::getPartitionId).collect(Collectors.toList())));
tableInfos.add(tableInfo);
}
} finally {
readUnlock();
}
return tableInfos;
}
protected List<List<Comparable>> getPartitionTransInfo(long txnId, long tableId) throws AnalysisException {
List<List<Comparable>> partitionInfos = new ArrayList<List<Comparable>>();
readLock();
try {
TransactionState transactionState = unprotectedGetTransactionState(txnId);
if (null == transactionState) {
throw new AnalysisException("Transaction[" + txnId + "] does not exist.");
}
TableCommitInfo tableCommitInfo = transactionState.getIdToTableCommitInfos().get(tableId);
Map<Long, PartitionCommitInfo> idToPartitionCommitInfo = tableCommitInfo.getIdToPartitionCommitInfo();
for (Map.Entry<Long, PartitionCommitInfo> entry : idToPartitionCommitInfo.entrySet()) {
List<Comparable> partitionInfo = new ArrayList<Comparable>();
partitionInfo.add(entry.getKey());
partitionInfo.add(entry.getValue().getVersion());
partitionInfos.add(partitionInfo);
}
} finally {
readUnlock();
}
return partitionInfos;
}
public void removeExpiredTxns(long currentMillis) {
writeLock();
try {
int numJobsToRemove = getTransactionNum() - Config.label_keep_max_num;
while (!finalStatusTransactionStateDeque.isEmpty()) {
TransactionState transactionState = finalStatusTransactionStateDeque.getFirst();
if (transactionState.isExpired(currentMillis) || numJobsToRemove > 0) {
finalStatusTransactionStateDeque.pop();
clearTransactionState(transactionState);
--numJobsToRemove;
LOG.info("transaction [" + transactionState.getTransactionId() +
"] is expired, remove it from transaction manager");
} else {
break;
}
}
} finally {
writeUnlock();
}
}
private void clearTransactionState(TransactionState transactionState) {
idToFinalStatusTransactionState.remove(transactionState.getTransactionId());
Set<Long> txnIds = unprotectedGetTxnIdsByLabel(transactionState.getLabel());
txnIds.remove(transactionState.getTransactionId());
if (txnIds.isEmpty()) {
labelToTxnIds.remove(transactionState.getLabel());
}
}
public int getTransactionNum() {
return idToRunningTransactionState.size() + finalStatusTransactionStateDeque.size();
}
public TransactionState getTransactionStateByCallbackIdAndStatus(long callbackId, Set<TransactionStatus> status) {
readLock();
try {
for (TransactionState txn : idToRunningTransactionState.values()) {
if (txn.getCallbackId() == callbackId && status.contains(txn.getTransactionStatus())) {
return txn;
}
}
for (TransactionState txn : finalStatusTransactionStateDeque) {
if (txn.getCallbackId() == callbackId && status.contains(txn.getTransactionStatus())) {
return txn;
}
}
} finally {
readUnlock();
}
return null;
}
public TransactionState getTransactionStateByCallbackId(long callbackId) {
readLock();
try {
for (TransactionState txn : idToRunningTransactionState.values()) {
if (txn.getCallbackId() == callbackId) {
return txn;
}
}
for (TransactionState txn : finalStatusTransactionStateDeque) {
if (txn.getCallbackId() == callbackId) {
return txn;
}
}
} finally {
readUnlock();
}
return null;
}
public List<Pair<Long, Long>> getTransactionIdByCoordinateBe(String coordinateHost, int limit) {
ArrayList<Pair<Long, Long>> txnInfos = new ArrayList<>();
readLock();
try {
idToRunningTransactionState.values().stream()
.filter(t -> (t.getCoordinator().sourceType == TransactionState.TxnSourceType.BE
&& t.getCoordinator().ip.equals(coordinateHost)))
.limit(limit)
.forEach(t -> txnInfos.add(new Pair<>(t.getDbId(), t.getTransactionId())));
} finally {
readUnlock();
}
return txnInfos;
}
public List<List<String>> getSingleTranInfo(long dbId, long txnId) throws AnalysisException {
List<List<String>> infos = new ArrayList<List<String>>();
readLock();
try {
Database db = Catalog.getCurrentCatalog().getDb(dbId);
if (db == null) {
throw new AnalysisException("Database[" + dbId + "] does not exist");
}
TransactionState txnState = unprotectedGetTransactionState(txnId);
if (txnState == null) {
throw new AnalysisException("transaction with id " + txnId + " does not exist");
}
if (ConnectContext.get() != null) {
Set<Long> tblIds = txnState.getIdToTableCommitInfos().keySet();
for (Long tblId : tblIds) {
Table tbl = db.getTable(tblId);
if (tbl != null) {
if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), db.getFullName(),
tbl.getName(), PrivPredicate.SHOW)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR,
"SHOW TRANSACTION",
ConnectContext.get().getQualifiedUser(),
ConnectContext.get().getRemoteIP(),
tbl.getName());
}
}
}
}
List<String> info = Lists.newArrayList();
getTxnStateInfo(txnState, info);
infos.add(info);
} finally {
readUnlock();
}
return infos;
}
protected void checkRunningTxnExceedLimit(TransactionState.LoadJobSourceType sourceType)
throws BeginTransactionException {
switch (sourceType) {
case ROUTINE_LOAD_TASK:
break;
default:
if (runningTxnNums >= Config.max_running_txn_num_per_db) {
throw new BeginTransactionException("current running txns on db " + dbId + " is "
+ runningTxnNums + ", larger than limit " + Config.max_running_txn_num_per_db);
}
break;
}
}
private void updateCatalogAfterCommitted(TransactionState transactionState, Database db) {
Set<Long> errorReplicaIds = transactionState.getErrorReplicas();
for (TableCommitInfo tableCommitInfo : transactionState.getIdToTableCommitInfos().values()) {
long tableId = tableCommitInfo.getTableId();
OlapTable table = (OlapTable) db.getTable(tableId);
for (PartitionCommitInfo partitionCommitInfo : tableCommitInfo.getIdToPartitionCommitInfo().values()) {
long partitionId = partitionCommitInfo.getPartitionId();
Partition partition = table.getPartition(partitionId);
if (!partition.isUseStarOS()) {
List<MaterializedIndex> allIndices =
partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL);
for (MaterializedIndex index : allIndices) {
for (Tablet tablet : index.getTablets()) {
for (Replica replica : ((LocalTablet) tablet).getReplicas()) {
if (errorReplicaIds.contains(replica.getId())) {
replica.updateLastFailedVersion(partitionCommitInfo.getVersion());
}
}
}
}
}
partition.setNextVersion(partition.getNextVersion() + 1);
}
}
}
private boolean updateCatalogAfterVisible(TransactionState transactionState, Database db) {
Set<Long> errorReplicaIds = transactionState.getErrorReplicas();
for (TableCommitInfo tableCommitInfo : transactionState.getIdToTableCommitInfos().values()) {
long tableId = tableCommitInfo.getTableId();
OlapTable table = (OlapTable) db.getTable(tableId);
List<String> validDictCacheColumns = Lists.newArrayList();
long maxPartitionVersionTime = -1;
for (PartitionCommitInfo partitionCommitInfo : tableCommitInfo.getIdToPartitionCommitInfo().values()) {
long partitionId = partitionCommitInfo.getPartitionId();
long newCommitVersion = partitionCommitInfo.getVersion();
Partition partition = table.getPartition(partitionId);
if (!partition.isUseStarOS()) {
List<MaterializedIndex> allIndices =
partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL);
for (MaterializedIndex index : allIndices) {
for (Tablet tablet : index.getTablets()) {
for (Replica replica : ((LocalTablet) tablet).getReplicas()) {
long lastFailedVersion = replica.getLastFailedVersion();
long newVersion = newCommitVersion;
long lastSucessVersion = replica.getLastSuccessVersion();
if (!errorReplicaIds.contains(replica.getId())) {
if (replica.getLastFailedVersion() > 0) {
newVersion = replica.getVersion();
} else if (!replica.checkVersionCatchUp(partition.getVisibleVersion(), true)) {
lastFailedVersion = partition.getVisibleVersion();
newVersion = replica.getVersion();
}
lastSucessVersion = newCommitVersion;
} else {
newVersion = replica.getVersion();
if (newCommitVersion > lastFailedVersion) {
lastFailedVersion = newCommitVersion;
}
}
replica.updateVersionInfo(newVersion, lastFailedVersion, lastSucessVersion);
}
}
}
}
long version = partitionCommitInfo.getVersion();
long versionTime = partitionCommitInfo.getVersionTime();
partition.updateVisibleVersion(version, versionTime);
if (LOG.isDebugEnabled()) {
LOG.debug("transaction state {} set partition {}'s version to [{}]",
transactionState, partition.getId(), version);
}
if (!partitionCommitInfo.getInvalidDictCacheColumns().isEmpty()) {
for (String column : partitionCommitInfo.getInvalidDictCacheColumns()) {
IDictManager.getInstance().removeGlobalDict(tableId, column);
}
}
if (!partitionCommitInfo.getValidDictCacheColumns().isEmpty()) {
validDictCacheColumns = partitionCommitInfo.getValidDictCacheColumns();
}
maxPartitionVersionTime = Math.max(maxPartitionVersionTime, versionTime);
}
for (String column : validDictCacheColumns) {
IDictManager.getInstance().updateGlobalDict(tableId, column, maxPartitionVersionTime);
}
}
return true;
}
public boolean isPreviousTransactionsFinished(long endTransactionId, List<Long> tableIdList) {
readLock();
try {
for (Map.Entry<Long, TransactionState> entry : idToRunningTransactionState.entrySet()) {
if (entry.getValue().getDbId() != dbId || !isIntersectionNotEmpty(entry.getValue().getTableIdList(),
tableIdList) || !entry.getValue().isRunning()) {
continue;
}
if (entry.getKey() <= endTransactionId) {
LOG.debug("find a running txn with txn_id={} on db: {}, less than watermark txn_id {}",
entry.getKey(), dbId, endTransactionId);
return false;
}
}
} finally {
readUnlock();
}
return true;
}
/**
* check if there exists a intersection between the source tableId list and target tableId list
* if one of them is null or empty, that means that we don't know related tables in tableList,
* we think the two lists may have intersection for right ordered txns
*/
public boolean isIntersectionNotEmpty(List<Long> sourceTableIdList, List<Long> targetTableIdList) {
if (CollectionUtils.isEmpty(sourceTableIdList) || CollectionUtils.isEmpty(targetTableIdList)) {
return true;
}
for (Long srcValue : sourceTableIdList) {
for (Long targetValue : targetTableIdList) {
if (srcValue.equals(targetValue)) {
return true;
}
}
}
return false;
}
public List<Long> getTimeoutTxns(long currentMillis) {
List<Long> timeoutTxns = Lists.newArrayList();
readLock();
try {
for (TransactionState transactionState : idToRunningTransactionState.values()) {
if (transactionState.isTimeout(currentMillis)) {
timeoutTxns.add(transactionState.getTransactionId());
}
}
} finally {
readUnlock();
}
return timeoutTxns;
}
public void abortTimeoutTxns(long currentMillis) {
List<Long> timeoutTxns = getTimeoutTxns(currentMillis);
for (Long txnId : timeoutTxns) {
try {
abortTransaction(txnId, "timeout by txn manager", null);
LOG.info("transaction [" + txnId + "] is timeout, abort it by transaction manager");
} catch (UserException e) {
LOG.warn("abort timeout txn {} failed. msg: {}", txnId, e.getMessage());
}
}
}
public void replayUpsertTransactionState(TransactionState transactionState) {
writeLock();
try {
transactionState.replaySetTransactionStatus();
Database db = catalog.getDb(transactionState.getDbId());
if (transactionState.getTransactionStatus() == TransactionStatus.COMMITTED) {
LOG.info("replay a committed transaction {}", transactionState);
updateCatalogAfterCommitted(transactionState, db);
} else if (transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) {
LOG.info("replay a visible transaction {}", transactionState);
updateCatalogAfterVisible(transactionState, db);
}
unprotectUpsertTransactionState(transactionState, true);
} finally {
writeUnlock();
}
}
public List<List<String>> getDbTransStateInfo() {
List<List<String>> infos = Lists.newArrayList();
readLock();
try {
infos.add(Lists.newArrayList("running", String.valueOf(
runningTxnNums + runningRoutineLoadTxnNums)));
long finishedNum = getFinishedTxnNums();
infos.add(Lists.newArrayList("finished", String.valueOf(finishedNum)));
} finally {
readUnlock();
}
return infos;
}
public void unprotectWriteAllTransactionStates(DataOutput out) throws IOException {
for (Map.Entry<Long, TransactionState> entry : idToRunningTransactionState.entrySet()) {
entry.getValue().write(out);
}
for (TransactionState transactionState : finalStatusTransactionStateDeque) {
transactionState.write(out);
}
}
} |
I now have a version I think is just as effective in the no-change case, but also performs well in the other cases. I have also added a log warning on duplicate host allocation. | public void add(ApplicationInfo applicationInfo) {
ApplicationInfo oldApplicationInfo = applicationsById.put(applicationInfo.getApplicationId(), applicationInfo);
final String logPrefix;
if (oldApplicationInfo == null) {
logPrefix = isComplete ? "New application " : "Bootstrapped application ";
} else {
logPrefix = isComplete ? "Reactivated application " : "Rebootstrapped application ";
}
logger.log(LogLevel.INFO, logPrefix + applicationInfo.getApplicationId());
Set<HostName> oldHostnames = hostnamesById.remove(applicationInfo.getApplicationId());
if (oldHostnames != null) {
oldHostnames.forEach(applicationsByHostname::remove);
}
Set<HostName> hostnames = applicationInfo.getModel().getHosts().stream()
.map(HostInfo::getHostname)
.map(HostName::from)
.collect(Collectors.toSet());
hostnamesById.put(applicationInfo.getApplicationId(), hostnames);
hostnames.forEach(hostname -> applicationsByHostname.put(hostname, applicationInfo));
listeners.forEach(listener -> listener.applicationActivated(applicationInfo));
} | Set<HostName> oldHostnames = hostnamesById.remove(applicationInfo.getApplicationId()); | public void add(ApplicationInfo applicationInfo) {
ApplicationId id = applicationInfo.getApplicationId();
ApplicationInfo oldApplicationInfo = applicationsById.put(id, applicationInfo);
final String logPrefix;
if (oldApplicationInfo == null) {
logPrefix = isComplete ? "New application " : "Bootstrapped application ";
} else {
logPrefix = isComplete ? "Reactivated application " : "Rebootstrapped application ";
}
logger.log(LogLevel.INFO, logPrefix + id);
Set<HostName> hostnames = hostnamesById.computeIfAbsent(id, k -> new HashSet<>());
Set<HostName> removedHosts = new HashSet<>(hostnames);
applicationInfo.getModel().getHosts().stream()
.map(HostInfo::getHostname)
.map(HostName::from)
.forEach(hostname -> {
if (!removedHosts.remove(hostname)) {
hostnames.add(hostname);
ApplicationId previousId = idsByHostname.put(hostname, id);
if (previousId != null && !previousId.equals(id)) {
logger.log(LogLevel.WARNING, hostname + " has been reassigned from " +
previousId + " to " + id);
Set<HostName> previousHostnames = hostnamesById.get(previousId);
if (previousHostnames != null) {
previousHostnames.remove(hostname);
}
}
}
});
removedHosts.forEach(idsByHostname::remove);
listeners.forEach(listener -> listener.applicationActivated(applicationInfo));
} | class DuperModel {
private static Logger logger = Logger.getLogger(DuperModel.class.getName());
private final Map<ApplicationId, ApplicationInfo> applicationsById = new HashMap<>();
private final Map<HostName, ApplicationInfo> applicationsByHostname = new HashMap<>();
private final Map<ApplicationId, Set<HostName>> hostnamesById = new HashMap<>();
private final List<DuperModelListener> listeners = new ArrayList<>();
private boolean isComplete = false;
public void registerListener(DuperModelListener listener) {
applicationsById.values().forEach(listener::applicationActivated);
listeners.add(listener);
}
void setComplete() {
if (!isComplete) {
logger.log(LogLevel.INFO, "Bootstrap done - duper model is complete");
isComplete = true;
listeners.forEach(DuperModelListener::bootstrapComplete);
}
}
public boolean isComplete() { return isComplete; }
public int numberOfApplications() {
return applicationsById.size();
}
public int numberOfHosts() {
return applicationsByHostname.size();
}
public boolean contains(ApplicationId applicationId) {
return applicationsById.containsKey(applicationId);
}
public Optional<ApplicationInfo> getApplicationInfo(ApplicationId applicationId) {
return Optional.ofNullable(applicationsById.get(applicationId));
}
public Optional<ApplicationInfo> getApplicationInfo(HostName hostName) {
return Optional.ofNullable(applicationsByHostname.get(hostName));
}
public List<ApplicationInfo> getApplicationInfos() {
return List.copyOf(applicationsById.values());
}
public void remove(ApplicationId applicationId) {
Set<HostName> hostnames = hostnamesById.remove(applicationId);
if (hostnames != null) {
hostnames.forEach(applicationsByHostname::remove);
}
ApplicationInfo application = applicationsById.remove(applicationId);
if (application != null) {
logger.log(LogLevel.INFO, "Removed application " + applicationId);
listeners.forEach(listener -> listener.applicationRemoved(applicationId));
}
}
} | class DuperModel {
private static Logger logger = Logger.getLogger(DuperModel.class.getName());
private final Map<ApplicationId, ApplicationInfo> applicationsById = new HashMap<>();
private final Map<HostName, ApplicationId> idsByHostname = new HashMap<>();
private final Map<ApplicationId, Set<HostName>> hostnamesById = new HashMap<>();
private final List<DuperModelListener> listeners = new ArrayList<>();
private boolean isComplete = false;
public void registerListener(DuperModelListener listener) {
applicationsById.values().forEach(listener::applicationActivated);
listeners.add(listener);
}
void setComplete() {
if (!isComplete) {
logger.log(LogLevel.INFO, "Bootstrap done - duper model is complete");
isComplete = true;
listeners.forEach(DuperModelListener::bootstrapComplete);
}
}
public boolean isComplete() { return isComplete; }
public int numberOfApplications() {
return applicationsById.size();
}
public int numberOfHosts() {
return idsByHostname.size();
}
public boolean contains(ApplicationId applicationId) {
return applicationsById.containsKey(applicationId);
}
public Optional<ApplicationInfo> getApplicationInfo(ApplicationId applicationId) {
return Optional.ofNullable(applicationsById.get(applicationId));
}
public Optional<ApplicationInfo> getApplicationInfo(HostName hostName) {
return Optional.ofNullable(idsByHostname.get(hostName)).map(applicationsById::get);
}
public List<ApplicationInfo> getApplicationInfos() {
return List.copyOf(applicationsById.values());
}
public void remove(ApplicationId applicationId) {
Set<HostName> hostnames = hostnamesById.remove(applicationId);
if (hostnames != null) {
hostnames.forEach(idsByHostname::remove);
}
ApplicationInfo application = applicationsById.remove(applicationId);
if (application != null) {
logger.log(LogLevel.INFO, "Removed application " + applicationId);
listeners.forEach(listener -> listener.applicationRemoved(applicationId));
}
}
} |
During upgrade this will write a value with combined id that other instances that are not upgraded yet will not be able to read. Shouldn't we handle strings with combined id first and in a later version start writing it? | protected String toStringValue() {
return cluster.type().name() +
"/" + cluster.id().value() +
(cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") +
"/" + index +
( cluster.isExclusive() ? "/exclusive" : "") +
( retired ? "/retired" : "") +
( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : "");
} | ( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : ""); | protected String toStringValue() {
return cluster.type().name() +
"/" + cluster.id().value() +
(cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") +
"/" + index +
( cluster.isExclusive() ? "/exclusive" : "") +
( retired ? "/retired" : "") +
( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : "");
} | class ClusterMembership {
private ClusterSpec cluster;
private int index;
private boolean retired;
private String stringValue;
protected ClusterMembership() {}
private ClusterMembership(String stringValue, Version vespaVersion) {
String[] components = stringValue.split("/");
if (components.length < 4)
throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " +
"Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/combinedId]'");
boolean exclusive = false;
var combinedId = Optional.<String>empty();
if (components.length > 4) {
for (int i = 4; i < components.length; i++) {
String component = components[i];
switch (component) {
case "exclusive": exclusive = true; break;
case "retired": retired = true; break;
default: combinedId = Optional.of(component); break;
}
}
}
this.cluster = ClusterSpec.from(ClusterSpec.Type.valueOf(components[0]), ClusterSpec.Id.from(components[1]),
ClusterSpec.Group.from(Integer.parseInt(components[2])), vespaVersion,
exclusive, combinedId.map(ClusterSpec.Id::from));
this.index = Integer.parseInt(components[3]);
this.stringValue = toStringValue();
}
private ClusterMembership(ClusterSpec cluster, int index, boolean retired) {
this.cluster = cluster;
this.index = index;
this.retired = retired;
this.stringValue = toStringValue();
}
/** Returns the cluster this node is a member of */
public ClusterSpec cluster() { return cluster; }
/** Returns the index of this node within the cluster */
public int index() { return index; }
/** Returns whether the cluster should prepare for this node to be removed */
public boolean retired() { return retired; }
/** Returns a copy of this which is retired */
public ClusterMembership retire() {
return new ClusterMembership(cluster, index, true);
}
/** Returns a copy of this node which is not retired */
public ClusterMembership unretire() {
return new ClusterMembership(cluster, index, false);
}
public ClusterMembership with(ClusterSpec newCluster) {
return new ClusterMembership(newCluster, index, retired);
}
/**
* Returns all the information in this as a string which can be used to construct the same ClusterMembership
* instance using {@link
*/
public String stringValue() { return stringValue; }
@Override
public int hashCode() { return stringValue().hashCode(); }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof ClusterMembership)) return false;
return ((ClusterMembership)other).stringValue().equals(stringValue());
}
@Override
public String toString() { return stringValue(); }
public static ClusterMembership from(String stringValue, Version vespaVersion) {
return new ClusterMembership(stringValue, vespaVersion);
}
public static ClusterMembership from(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, false);
}
public static ClusterMembership retiredFrom(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, true);
}
} | class ClusterMembership {
private ClusterSpec cluster;
private int index;
private boolean retired;
private String stringValue;
protected ClusterMembership() {}
private ClusterMembership(String stringValue, Version vespaVersion) {
String[] components = stringValue.split("/");
if (components.length < 4)
throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " +
"Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/combinedId]'");
boolean exclusive = false;
var combinedId = Optional.<String>empty();
if (components.length > 4) {
for (int i = 4; i < components.length; i++) {
String component = components[i];
switch (component) {
case "exclusive": exclusive = true; break;
case "retired": retired = true; break;
default: combinedId = Optional.of(component); break;
}
}
}
this.cluster = ClusterSpec.from(ClusterSpec.Type.valueOf(components[0]), ClusterSpec.Id.from(components[1]),
ClusterSpec.Group.from(Integer.parseInt(components[2])), vespaVersion,
exclusive, combinedId.map(ClusterSpec.Id::from));
this.index = Integer.parseInt(components[3]);
this.stringValue = toStringValue();
}
private ClusterMembership(ClusterSpec cluster, int index, boolean retired) {
this.cluster = cluster;
this.index = index;
this.retired = retired;
this.stringValue = toStringValue();
}
/** Returns the cluster this node is a member of */
public ClusterSpec cluster() { return cluster; }
/** Returns the index of this node within the cluster */
public int index() { return index; }
/** Returns whether the cluster should prepare for this node to be removed */
public boolean retired() { return retired; }
/** Returns a copy of this which is retired */
public ClusterMembership retire() {
return new ClusterMembership(cluster, index, true);
}
/** Returns a copy of this node which is not retired */
public ClusterMembership unretire() {
return new ClusterMembership(cluster, index, false);
}
public ClusterMembership with(ClusterSpec newCluster) {
return new ClusterMembership(newCluster, index, retired);
}
/**
* Returns all the information in this as a string which can be used to construct the same ClusterMembership
* instance using {@link
*/
public String stringValue() { return stringValue; }
@Override
public int hashCode() { return stringValue().hashCode(); }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof ClusterMembership)) return false;
return ((ClusterMembership)other).stringValue().equals(stringValue());
}
@Override
public String toString() { return stringValue(); }
public static ClusterMembership from(String stringValue, Version vespaVersion) {
return new ClusterMembership(stringValue, vespaVersion);
}
public static ClusterMembership from(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, false);
}
public static ClusterMembership retiredFrom(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, true);
}
} |
Of course, thanks! | protected String toStringValue() {
return cluster.type().name() +
"/" + cluster.id().value() +
(cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") +
"/" + index +
( cluster.isExclusive() ? "/exclusive" : "") +
( retired ? "/retired" : "") +
( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : "");
} | ( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : ""); | protected String toStringValue() {
return cluster.type().name() +
"/" + cluster.id().value() +
(cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") +
"/" + index +
( cluster.isExclusive() ? "/exclusive" : "") +
( retired ? "/retired" : "") +
( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : "");
} | class ClusterMembership {
private ClusterSpec cluster;
private int index;
private boolean retired;
private String stringValue;
protected ClusterMembership() {}
private ClusterMembership(String stringValue, Version vespaVersion) {
String[] components = stringValue.split("/");
if (components.length < 4)
throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " +
"Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/combinedId]'");
boolean exclusive = false;
var combinedId = Optional.<String>empty();
if (components.length > 4) {
for (int i = 4; i < components.length; i++) {
String component = components[i];
switch (component) {
case "exclusive": exclusive = true; break;
case "retired": retired = true; break;
default: combinedId = Optional.of(component); break;
}
}
}
this.cluster = ClusterSpec.from(ClusterSpec.Type.valueOf(components[0]), ClusterSpec.Id.from(components[1]),
ClusterSpec.Group.from(Integer.parseInt(components[2])), vespaVersion,
exclusive, combinedId.map(ClusterSpec.Id::from));
this.index = Integer.parseInt(components[3]);
this.stringValue = toStringValue();
}
private ClusterMembership(ClusterSpec cluster, int index, boolean retired) {
this.cluster = cluster;
this.index = index;
this.retired = retired;
this.stringValue = toStringValue();
}
/** Returns the cluster this node is a member of */
public ClusterSpec cluster() { return cluster; }
/** Returns the index of this node within the cluster */
public int index() { return index; }
/** Returns whether the cluster should prepare for this node to be removed */
public boolean retired() { return retired; }
/** Returns a copy of this which is retired */
public ClusterMembership retire() {
return new ClusterMembership(cluster, index, true);
}
/** Returns a copy of this node which is not retired */
public ClusterMembership unretire() {
return new ClusterMembership(cluster, index, false);
}
public ClusterMembership with(ClusterSpec newCluster) {
return new ClusterMembership(newCluster, index, retired);
}
/**
* Returns all the information in this as a string which can be used to construct the same ClusterMembership
* instance using {@link
*/
public String stringValue() { return stringValue; }
@Override
public int hashCode() { return stringValue().hashCode(); }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof ClusterMembership)) return false;
return ((ClusterMembership)other).stringValue().equals(stringValue());
}
@Override
public String toString() { return stringValue(); }
public static ClusterMembership from(String stringValue, Version vespaVersion) {
return new ClusterMembership(stringValue, vespaVersion);
}
public static ClusterMembership from(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, false);
}
public static ClusterMembership retiredFrom(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, true);
}
} | class ClusterMembership {
private ClusterSpec cluster;
private int index;
private boolean retired;
private String stringValue;
protected ClusterMembership() {}
private ClusterMembership(String stringValue, Version vespaVersion) {
String[] components = stringValue.split("/");
if (components.length < 4)
throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " +
"Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/combinedId]'");
boolean exclusive = false;
var combinedId = Optional.<String>empty();
if (components.length > 4) {
for (int i = 4; i < components.length; i++) {
String component = components[i];
switch (component) {
case "exclusive": exclusive = true; break;
case "retired": retired = true; break;
default: combinedId = Optional.of(component); break;
}
}
}
this.cluster = ClusterSpec.from(ClusterSpec.Type.valueOf(components[0]), ClusterSpec.Id.from(components[1]),
ClusterSpec.Group.from(Integer.parseInt(components[2])), vespaVersion,
exclusive, combinedId.map(ClusterSpec.Id::from));
this.index = Integer.parseInt(components[3]);
this.stringValue = toStringValue();
}
private ClusterMembership(ClusterSpec cluster, int index, boolean retired) {
this.cluster = cluster;
this.index = index;
this.retired = retired;
this.stringValue = toStringValue();
}
/** Returns the cluster this node is a member of */
public ClusterSpec cluster() { return cluster; }
/** Returns the index of this node within the cluster */
public int index() { return index; }
/** Returns whether the cluster should prepare for this node to be removed */
public boolean retired() { return retired; }
/** Returns a copy of this which is retired */
public ClusterMembership retire() {
return new ClusterMembership(cluster, index, true);
}
/** Returns a copy of this node which is not retired */
public ClusterMembership unretire() {
return new ClusterMembership(cluster, index, false);
}
public ClusterMembership with(ClusterSpec newCluster) {
return new ClusterMembership(newCluster, index, retired);
}
/**
* Returns all the information in this as a string which can be used to construct the same ClusterMembership
* instance using {@link
*/
public String stringValue() { return stringValue; }
@Override
public int hashCode() { return stringValue().hashCode(); }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof ClusterMembership)) return false;
return ((ClusterMembership)other).stringValue().equals(stringValue());
}
@Override
public String toString() { return stringValue(); }
public static ClusterMembership from(String stringValue, Version vespaVersion) {
return new ClusterMembership(stringValue, vespaVersion);
}
public static ClusterMembership from(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, false);
}
public static ClusterMembership retiredFrom(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, true);
}
} |
Actually, the existing code appears to handle this. It ignores trailing components of the serialized string that it does not explicitly handle. | protected String toStringValue() {
return cluster.type().name() +
"/" + cluster.id().value() +
(cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") +
"/" + index +
( cluster.isExclusive() ? "/exclusive" : "") +
( retired ? "/retired" : "") +
( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : "");
} | ( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : ""); | protected String toStringValue() {
return cluster.type().name() +
"/" + cluster.id().value() +
(cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") +
"/" + index +
( cluster.isExclusive() ? "/exclusive" : "") +
( retired ? "/retired" : "") +
( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : "");
} | class ClusterMembership {
private ClusterSpec cluster;
private int index;
private boolean retired;
private String stringValue;
protected ClusterMembership() {}
private ClusterMembership(String stringValue, Version vespaVersion) {
String[] components = stringValue.split("/");
if (components.length < 4)
throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " +
"Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/combinedId]'");
boolean exclusive = false;
var combinedId = Optional.<String>empty();
if (components.length > 4) {
for (int i = 4; i < components.length; i++) {
String component = components[i];
switch (component) {
case "exclusive": exclusive = true; break;
case "retired": retired = true; break;
default: combinedId = Optional.of(component); break;
}
}
}
this.cluster = ClusterSpec.from(ClusterSpec.Type.valueOf(components[0]), ClusterSpec.Id.from(components[1]),
ClusterSpec.Group.from(Integer.parseInt(components[2])), vespaVersion,
exclusive, combinedId.map(ClusterSpec.Id::from));
this.index = Integer.parseInt(components[3]);
this.stringValue = toStringValue();
}
private ClusterMembership(ClusterSpec cluster, int index, boolean retired) {
this.cluster = cluster;
this.index = index;
this.retired = retired;
this.stringValue = toStringValue();
}
/** Returns the cluster this node is a member of */
public ClusterSpec cluster() { return cluster; }
/** Returns the index of this node within the cluster */
public int index() { return index; }
/** Returns whether the cluster should prepare for this node to be removed */
public boolean retired() { return retired; }
/** Returns a copy of this which is retired */
public ClusterMembership retire() {
return new ClusterMembership(cluster, index, true);
}
/** Returns a copy of this node which is not retired */
public ClusterMembership unretire() {
return new ClusterMembership(cluster, index, false);
}
public ClusterMembership with(ClusterSpec newCluster) {
return new ClusterMembership(newCluster, index, retired);
}
/**
* Returns all the information in this as a string which can be used to construct the same ClusterMembership
* instance using {@link
*/
public String stringValue() { return stringValue; }
@Override
public int hashCode() { return stringValue().hashCode(); }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof ClusterMembership)) return false;
return ((ClusterMembership)other).stringValue().equals(stringValue());
}
@Override
public String toString() { return stringValue(); }
public static ClusterMembership from(String stringValue, Version vespaVersion) {
return new ClusterMembership(stringValue, vespaVersion);
}
public static ClusterMembership from(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, false);
}
public static ClusterMembership retiredFrom(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, true);
}
} | class ClusterMembership {
private ClusterSpec cluster;
private int index;
private boolean retired;
private String stringValue;
protected ClusterMembership() {}
private ClusterMembership(String stringValue, Version vespaVersion) {
String[] components = stringValue.split("/");
if (components.length < 4)
throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " +
"Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/combinedId]'");
boolean exclusive = false;
var combinedId = Optional.<String>empty();
if (components.length > 4) {
for (int i = 4; i < components.length; i++) {
String component = components[i];
switch (component) {
case "exclusive": exclusive = true; break;
case "retired": retired = true; break;
default: combinedId = Optional.of(component); break;
}
}
}
this.cluster = ClusterSpec.from(ClusterSpec.Type.valueOf(components[0]), ClusterSpec.Id.from(components[1]),
ClusterSpec.Group.from(Integer.parseInt(components[2])), vespaVersion,
exclusive, combinedId.map(ClusterSpec.Id::from));
this.index = Integer.parseInt(components[3]);
this.stringValue = toStringValue();
}
private ClusterMembership(ClusterSpec cluster, int index, boolean retired) {
this.cluster = cluster;
this.index = index;
this.retired = retired;
this.stringValue = toStringValue();
}
/** Returns the cluster this node is a member of */
public ClusterSpec cluster() { return cluster; }
/** Returns the index of this node within the cluster */
public int index() { return index; }
/** Returns whether the cluster should prepare for this node to be removed */
public boolean retired() { return retired; }
/** Returns a copy of this which is retired */
public ClusterMembership retire() {
return new ClusterMembership(cluster, index, true);
}
/** Returns a copy of this node which is not retired */
public ClusterMembership unretire() {
return new ClusterMembership(cluster, index, false);
}
public ClusterMembership with(ClusterSpec newCluster) {
return new ClusterMembership(newCluster, index, retired);
}
/**
* Returns all the information in this as a string which can be used to construct the same ClusterMembership
* instance using {@link
*/
public String stringValue() { return stringValue; }
@Override
public int hashCode() { return stringValue().hashCode(); }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof ClusterMembership)) return false;
return ((ClusterMembership)other).stringValue().equals(stringValue());
}
@Override
public String toString() { return stringValue(); }
public static ClusterMembership from(String stringValue, Version vespaVersion) {
return new ClusterMembership(stringValue, vespaVersion);
}
public static ClusterMembership from(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, false);
}
public static ClusterMembership retiredFrom(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, true);
}
} |
Yes, it might not be an issue here. As long as you don't need the combined id I guess that will work, but if you later rely upon this (e.g. in a later PR that goes in before this is released) something might break. | protected String toStringValue() {
return cluster.type().name() +
"/" + cluster.id().value() +
(cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") +
"/" + index +
( cluster.isExclusive() ? "/exclusive" : "") +
( retired ? "/retired" : "") +
( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : "");
} | ( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : ""); | protected String toStringValue() {
return cluster.type().name() +
"/" + cluster.id().value() +
(cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") +
"/" + index +
( cluster.isExclusive() ? "/exclusive" : "") +
( retired ? "/retired" : "") +
( cluster.combinedId().isPresent() ? "/" + cluster.combinedId().get().value() : "");
} | class ClusterMembership {
private ClusterSpec cluster;
private int index;
private boolean retired;
private String stringValue;
protected ClusterMembership() {}
private ClusterMembership(String stringValue, Version vespaVersion) {
String[] components = stringValue.split("/");
if (components.length < 4)
throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " +
"Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/combinedId]'");
boolean exclusive = false;
var combinedId = Optional.<String>empty();
if (components.length > 4) {
for (int i = 4; i < components.length; i++) {
String component = components[i];
switch (component) {
case "exclusive": exclusive = true; break;
case "retired": retired = true; break;
default: combinedId = Optional.of(component); break;
}
}
}
this.cluster = ClusterSpec.from(ClusterSpec.Type.valueOf(components[0]), ClusterSpec.Id.from(components[1]),
ClusterSpec.Group.from(Integer.parseInt(components[2])), vespaVersion,
exclusive, combinedId.map(ClusterSpec.Id::from));
this.index = Integer.parseInt(components[3]);
this.stringValue = toStringValue();
}
private ClusterMembership(ClusterSpec cluster, int index, boolean retired) {
this.cluster = cluster;
this.index = index;
this.retired = retired;
this.stringValue = toStringValue();
}
/** Returns the cluster this node is a member of */
public ClusterSpec cluster() { return cluster; }
/** Returns the index of this node within the cluster */
public int index() { return index; }
/** Returns whether the cluster should prepare for this node to be removed */
public boolean retired() { return retired; }
/** Returns a copy of this which is retired */
public ClusterMembership retire() {
return new ClusterMembership(cluster, index, true);
}
/** Returns a copy of this node which is not retired */
public ClusterMembership unretire() {
return new ClusterMembership(cluster, index, false);
}
public ClusterMembership with(ClusterSpec newCluster) {
return new ClusterMembership(newCluster, index, retired);
}
/**
* Returns all the information in this as a string which can be used to construct the same ClusterMembership
* instance using {@link
*/
public String stringValue() { return stringValue; }
@Override
public int hashCode() { return stringValue().hashCode(); }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof ClusterMembership)) return false;
return ((ClusterMembership)other).stringValue().equals(stringValue());
}
@Override
public String toString() { return stringValue(); }
public static ClusterMembership from(String stringValue, Version vespaVersion) {
return new ClusterMembership(stringValue, vespaVersion);
}
public static ClusterMembership from(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, false);
}
public static ClusterMembership retiredFrom(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, true);
}
} | class ClusterMembership {
private ClusterSpec cluster;
private int index;
private boolean retired;
private String stringValue;
protected ClusterMembership() {}
private ClusterMembership(String stringValue, Version vespaVersion) {
String[] components = stringValue.split("/");
if (components.length < 4)
throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " +
"Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/combinedId]'");
boolean exclusive = false;
var combinedId = Optional.<String>empty();
if (components.length > 4) {
for (int i = 4; i < components.length; i++) {
String component = components[i];
switch (component) {
case "exclusive": exclusive = true; break;
case "retired": retired = true; break;
default: combinedId = Optional.of(component); break;
}
}
}
this.cluster = ClusterSpec.from(ClusterSpec.Type.valueOf(components[0]), ClusterSpec.Id.from(components[1]),
ClusterSpec.Group.from(Integer.parseInt(components[2])), vespaVersion,
exclusive, combinedId.map(ClusterSpec.Id::from));
this.index = Integer.parseInt(components[3]);
this.stringValue = toStringValue();
}
private ClusterMembership(ClusterSpec cluster, int index, boolean retired) {
this.cluster = cluster;
this.index = index;
this.retired = retired;
this.stringValue = toStringValue();
}
/** Returns the cluster this node is a member of */
public ClusterSpec cluster() { return cluster; }
/** Returns the index of this node within the cluster */
public int index() { return index; }
/** Returns whether the cluster should prepare for this node to be removed */
public boolean retired() { return retired; }
/** Returns a copy of this which is retired */
public ClusterMembership retire() {
return new ClusterMembership(cluster, index, true);
}
/** Returns a copy of this node which is not retired */
public ClusterMembership unretire() {
return new ClusterMembership(cluster, index, false);
}
public ClusterMembership with(ClusterSpec newCluster) {
return new ClusterMembership(newCluster, index, retired);
}
/**
* Returns all the information in this as a string which can be used to construct the same ClusterMembership
* instance using {@link
*/
public String stringValue() { return stringValue; }
@Override
public int hashCode() { return stringValue().hashCode(); }
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof ClusterMembership)) return false;
return ((ClusterMembership)other).stringValue().equals(stringValue());
}
@Override
public String toString() { return stringValue(); }
public static ClusterMembership from(String stringValue, Version vespaVersion) {
return new ClusterMembership(stringValue, vespaVersion);
}
public static ClusterMembership from(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, false);
}
public static ClusterMembership retiredFrom(ClusterSpec cluster, int index) {
return new ClusterMembership(cluster, index, true);
}
} |
```suggestion if (best.isEmpty() || best.get().cost() > candidate.cost()) ``` Previously this would compare cost of advertised resources for the best match against cost of real resources for the candidate. | private Optional<AllocatableClusterResources> toAllocatableResources(ClusterResources resources, ClusterSpec cluster) {
if (allowsHostSharing(nodeRepository.zone().cloud())) {
NodeResources nodeResources = nodeResourceLimits.enlargeToLegal(resources.nodeResources(), cluster.type());
for (Flavor flavor : nodeRepository.getAvailableFlavors().getFlavors())
if (flavor.resources().satisfies(nodeResources))
return Optional.of(new AllocatableClusterResources(resources.with(nodeResources),
nodeResources));
return Optional.empty();
}
else {
Optional<AllocatableClusterResources> best = Optional.empty();
for (Flavor flavor : nodeRepository.getAvailableFlavors().getFlavors()) {
if ( ! flavor.resources().satisfies(resources.nodeResources())) continue;
if (flavor.resources().storageType() == NodeResources.StorageType.remote)
flavor = flavor.with(FlavorOverrides.ofDisk(resources.nodeResources().diskGb()));
var candidate = new AllocatableClusterResources(resources.with(flavor.resources()),
hostResourcesCalculator.availableCapacityOf(flavor.name(), flavor.resources()));
if (best.isEmpty() || best.get().cost() > costOf(flavor.resources()))
best = Optional.of(candidate);
}
return best;
}
} | if (best.isEmpty() || best.get().cost() > costOf(flavor.resources())) | private Optional<AllocatableClusterResources> toAllocatableResources(ClusterResources resources, ClusterSpec cluster) {
NodeResources nodeResources = nodeResourceLimits.enlargeToLegal(resources.nodeResources(), cluster.type());
if (allowsHostSharing(nodeRepository.zone().cloud())) {
for (Flavor flavor : nodeRepository.getAvailableFlavors().getFlavors())
if (flavor.resources().satisfies(nodeResources))
return Optional.of(new AllocatableClusterResources(resources.with(nodeResources),
nodeResources));
return Optional.empty();
}
else {
Optional<AllocatableClusterResources> best = Optional.empty();
for (Flavor flavor : nodeRepository.getAvailableFlavors().getFlavors()) {
if ( ! flavor.resources().satisfies(nodeResources)) continue;
if (flavor.resources().storageType() == NodeResources.StorageType.remote)
flavor = flavor.with(FlavorOverrides.ofDisk(nodeResources.diskGb()));
var candidate = new AllocatableClusterResources(resources.with(flavor.resources()),
flavor,
resourcesCalculator);
if (best.isEmpty() || candidate.cost() <= best.get().cost())
best = Optional.of(candidate);
}
return best;
}
} | class Autoscaler {
private Logger log = Logger.getLogger(Autoscaler.class.getName());
/*
TODO:
- Scale group size
- Consider taking spikes/variance into account
- Measure observed regulation lag (startup+redistribution) and take it into account when deciding regulation observation window
- Test AutoscalingMaintainer
- Scale by performance not just load+cost
*/
private static final int minimumMeasurements = 500;
/** What cost difference factor warrants reallocation? */
private static final double costDifferenceRatioWorthReallocation = 0.1;
/** What difference factor from ideal (for any resource) warrants a change? */
private static final double idealDivergenceWorthReallocation = 0.1;
private static final double cpuUnitCost = 12.0;
private static final double memoryUnitCost = 1.2;
private static final double diskUnitCost = 0.045;
private final HostResourcesCalculator hostResourcesCalculator;
private final NodeMetricsDb metricsDb;
private final NodeRepository nodeRepository;
private final NodeResourceLimits nodeResourceLimits;
public Autoscaler(HostResourcesCalculator hostResourcesCalculator,
NodeMetricsDb metricsDb,
NodeRepository nodeRepository) {
this.hostResourcesCalculator = hostResourcesCalculator;
this.metricsDb = metricsDb;
this.nodeRepository = nodeRepository;
this.nodeResourceLimits = new NodeResourceLimits(nodeRepository.zone());
}
public Optional<ClusterResources> autoscale(ApplicationId applicationId, ClusterSpec cluster, List<Node> clusterNodes) {
if (clusterNodes.stream().anyMatch(node -> node.status().wantToRetire() ||
node.allocation().get().membership().retired() ||
node.allocation().get().isRemovable()))
return Optional.empty();
ClusterResources currentAllocation = new ClusterResources(clusterNodes);
Optional<Double> cpuLoad = averageLoad(Resource.cpu, cluster, clusterNodes);
Optional<Double> memoryLoad = averageLoad(Resource.memory, cluster, clusterNodes);
Optional<Double> diskLoad = averageLoad(Resource.disk, cluster, clusterNodes);
if (cpuLoad.isEmpty() || memoryLoad.isEmpty() || diskLoad.isEmpty()) {
log.fine("Autoscaling " + applicationId + " " + cluster + ": Insufficient metrics to decide");
return Optional.empty();
}
Optional<AllocatableClusterResources> bestAllocation = findBestAllocation(cpuLoad.get(),
memoryLoad.get(),
diskLoad.get(),
currentAllocation,
cluster);
if (bestAllocation.isEmpty()) {
log.fine("Autoscaling " + applicationId + " " + cluster + ": Could not find a better allocation");
return Optional.empty();
}
if (closeToIdeal(Resource.cpu, cpuLoad.get()) &&
closeToIdeal(Resource.memory, memoryLoad.get()) &&
closeToIdeal(Resource.disk, diskLoad.get()) &&
similarCost(bestAllocation.get().cost(), currentAllocation.nodes() * costOf(currentAllocation.nodeResources()))) {
log.fine("Autoscaling " + applicationId + " " + cluster + ": Resources are almost ideal and price difference is small");
return Optional.empty();
}
return bestAllocation.map(a -> a.advertisedResources());
}
private Optional<AllocatableClusterResources> findBestAllocation(double cpuLoad, double memoryLoad, double diskLoad,
ClusterResources currentAllocation, ClusterSpec cluster) {
Optional<AllocatableClusterResources> bestAllocation = Optional.empty();
for (ResourceIterator i = new ResourceIterator(cpuLoad, memoryLoad, diskLoad, currentAllocation); i.hasNext(); ) {
ClusterResources allocation = i.next();
Optional<AllocatableClusterResources> allocatableResources = toAllocatableResources(allocation, cluster);
if (allocatableResources.isEmpty()) continue;
if (bestAllocation.isEmpty() || allocatableResources.get().cost() < bestAllocation.get().cost())
bestAllocation = allocatableResources;
}
return bestAllocation;
}
private boolean similarCost(double cost1, double cost2) {
return similar(cost1, cost2, costDifferenceRatioWorthReallocation);
}
private boolean closeToIdeal(Resource resource, double value) {
return similar(resource.idealAverageLoad(), value, idealDivergenceWorthReallocation);
}
private boolean similar(double r1, double r2, double threshold) {
return Math.abs(r1 - r2) / r1 < threshold;
}
/**
* Returns the smallest allocatable node resources larger than the given node resources,
* or empty if none available.
*/
/**
* Returns the average load of this resource in the measurement window,
* or empty if we are not in a position to make decisions from these measurements at this time.
*/
private Optional<Double> averageLoad(Resource resource, ClusterSpec cluster, List<Node> clusterNodes) {
NodeMetricsDb.Window window = metricsDb.getWindow(nodeRepository.clock().instant().minus(scalingWindow(cluster.type())),
resource,
clusterNodes.stream().map(Node::hostname).collect(Collectors.toList()));
if (window.measurementCount() < minimumMeasurements) return Optional.empty();
if (window.hostnames() != clusterNodes.size()) return Optional.empty();
return Optional.of(window.average());
}
/** The duration of the window we need to consider to make a scaling decision */
private Duration scalingWindow(ClusterSpec.Type clusterType) {
if (clusterType.isContent()) return Duration.ofHours(12);
return Duration.ofHours(12);
}
private boolean allowsHostSharing(CloudName cloudName) {
if (cloudName.value().equals("aws")) return false;
return true;
}
static double costOf(NodeResources resources) {
return resources.vcpu() * cpuUnitCost +
resources.memoryGb() * memoryUnitCost +
resources.diskGb() * diskUnitCost;
}
} | class Autoscaler {
private Logger log = Logger.getLogger(Autoscaler.class.getName());
/*
TODO:
- Scale group size
- Consider taking spikes/variance into account
- Measure observed regulation lag (startup+redistribution) and take it into account when deciding regulation observation window
- Test AutoscalingMaintainer
- Scale by performance not just load+cost
*/
private static final int minimumMeasurements = 500;
/** What cost difference factor warrants reallocation? */
private static final double costDifferenceRatioWorthReallocation = 0.1;
/** What difference factor from ideal (for any resource) warrants a change? */
private static final double idealDivergenceWorthReallocation = 0.1;
private static final double cpuUnitCost = 12.0;
private static final double memoryUnitCost = 1.2;
private static final double diskUnitCost = 0.045;
private final HostResourcesCalculator resourcesCalculator;
private final NodeMetricsDb metricsDb;
private final NodeRepository nodeRepository;
private final NodeResourceLimits nodeResourceLimits;
public Autoscaler(HostResourcesCalculator resourcesCalculator,
NodeMetricsDb metricsDb,
NodeRepository nodeRepository) {
this.resourcesCalculator = resourcesCalculator;
this.metricsDb = metricsDb;
this.nodeRepository = nodeRepository;
this.nodeResourceLimits = new NodeResourceLimits(nodeRepository.zone());
}
public Optional<AllocatableClusterResources> autoscale(ApplicationId applicationId, ClusterSpec cluster, List<Node> clusterNodes) {
if (clusterNodes.stream().anyMatch(node -> node.status().wantToRetire() ||
node.allocation().get().membership().retired() ||
node.allocation().get().isRemovable()))
return Optional.empty();
AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes, resourcesCalculator);
Optional<Double> cpuLoad = averageLoad(Resource.cpu, cluster, clusterNodes);
Optional<Double> memoryLoad = averageLoad(Resource.memory, cluster, clusterNodes);
Optional<Double> diskLoad = averageLoad(Resource.disk, cluster, clusterNodes);
if (cpuLoad.isEmpty() || memoryLoad.isEmpty() || diskLoad.isEmpty()) {
log.fine("Autoscaling " + applicationId + " " + cluster + ": Insufficient metrics to decide");
return Optional.empty();
}
Optional<AllocatableClusterResources> bestAllocation = findBestAllocation(cpuLoad.get(),
memoryLoad.get(),
diskLoad.get(),
currentAllocation,
cluster);
if (bestAllocation.isEmpty()) {
log.fine("Autoscaling " + applicationId + " " + cluster + ": Could not find a better allocation");
return Optional.empty();
}
if (closeToIdeal(Resource.cpu, cpuLoad.get()) &&
closeToIdeal(Resource.memory, memoryLoad.get()) &&
closeToIdeal(Resource.disk, diskLoad.get()) &&
similarCost(bestAllocation.get().cost(), currentAllocation.cost())) {
log.fine("Autoscaling " + applicationId + " " + cluster + ": Resources are almost ideal and price difference is small");
return Optional.empty();
}
return bestAllocation;
}
private Optional<AllocatableClusterResources> findBestAllocation(double cpuLoad, double memoryLoad, double diskLoad,
AllocatableClusterResources currentAllocation, ClusterSpec cluster) {
Optional<AllocatableClusterResources> bestAllocation = Optional.empty();
for (ResourceIterator i = new ResourceIterator(cpuLoad, memoryLoad, diskLoad, currentAllocation); i.hasNext(); ) {
ClusterResources allocation = i.next();
Optional<AllocatableClusterResources> allocatableResources = toAllocatableResources(allocation, cluster);
if (allocatableResources.isEmpty()) continue;
if (bestAllocation.isEmpty() || allocatableResources.get().cost() < bestAllocation.get().cost())
bestAllocation = allocatableResources;
}
return bestAllocation;
}
private boolean similarCost(double cost1, double cost2) {
return similar(cost1, cost2, costDifferenceRatioWorthReallocation);
}
private boolean closeToIdeal(Resource resource, double value) {
return similar(resource.idealAverageLoad(), value, idealDivergenceWorthReallocation);
}
private boolean similar(double r1, double r2, double threshold) {
return Math.abs(r1 - r2) / r1 < threshold;
}
/**
* Returns the smallest allocatable node resources larger than the given node resources,
* or empty if none available.
*/
/**
* Returns the average load of this resource in the measurement window,
* or empty if we are not in a position to make decisions from these measurements at this time.
*/
private Optional<Double> averageLoad(Resource resource, ClusterSpec cluster, List<Node> clusterNodes) {
NodeMetricsDb.Window window = metricsDb.getWindow(nodeRepository.clock().instant().minus(scalingWindow(cluster.type())),
resource,
clusterNodes.stream().map(Node::hostname).collect(Collectors.toList()));
if (window.measurementCount() < minimumMeasurements) return Optional.empty();
if (window.hostnames() != clusterNodes.size()) return Optional.empty();
return Optional.of(window.average());
}
/** The duration of the window we need to consider to make a scaling decision */
private Duration scalingWindow(ClusterSpec.Type clusterType) {
if (clusterType.isContent()) return Duration.ofHours(12);
return Duration.ofHours(12);
}
private boolean allowsHostSharing(CloudName cloudName) {
if (cloudName.value().equals("aws")) return false;
return true;
}
static double costOf(NodeResources resources) {
return resources.vcpu() * cpuUnitCost +
resources.memoryGb() * memoryUnitCost +
resources.diskGb() * diskUnitCost;
}
} |
Combine with previous condition? Or is it necessary to treat them differently? | private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCName(endpointName);
if (cNameValue.isEmpty()) {
logger.log(INFO, "CNAME '" + endpointName + "' does not yet point to anything");
return false;
}
if ( ! cNameValue.get().equals(policy.canonicalName())) {
logger.log(INFO, "CNAME '" + endpointName + "' doesn't point to expected host name '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if (loadBalancerAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for load balancer '" + policy.canonicalName() + "'");
return false;
}
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.get() + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
} | if ( ! cNameValue.get().equals(policy.canonicalName())) { | private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCname(endpointName);
if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) {
logger.log(INFO, "CNAME '" + endpointName + "' points at " +
cNameValue.map(name -> "'" + name + "'").orElse("nothing") +
" but should point at load balancer '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration capacityTimeout = Duration.ofMinutes(5);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration endpointCertificateTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(capacityTimeout).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason ="No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration capacityTimeout = Duration.ofMinutes(5);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration endpointCertificateTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(capacityTimeout).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason ="No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
Could probably collapse this, yes. | private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCName(endpointName);
if (cNameValue.isEmpty()) {
logger.log(INFO, "CNAME '" + endpointName + "' does not yet point to anything");
return false;
}
if ( ! cNameValue.get().equals(policy.canonicalName())) {
logger.log(INFO, "CNAME '" + endpointName + "' doesn't point to expected host name '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if (loadBalancerAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for load balancer '" + policy.canonicalName() + "'");
return false;
}
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.get() + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
} | if ( ! cNameValue.get().equals(policy.canonicalName())) { | private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCname(endpointName);
if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) {
logger.log(INFO, "CNAME '" + endpointName + "' points at " +
cNameValue.map(name -> "'" + name + "'").orElse("nothing") +
" but should point at load balancer '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration capacityTimeout = Duration.ofMinutes(5);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration endpointCertificateTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(capacityTimeout).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason ="No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration capacityTimeout = Duration.ofMinutes(5);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration endpointCertificateTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration installationTimeout = Duration.ofMinutes(60);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(capacityTimeout).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(installationTimeout));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + installationTimeout.toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(installationTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason ="No nodes allowed to suspend to progress installation for " + installationTimeout.toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
Good. | private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.versionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
} | VersionStatus versionStatus = controller.versionStatus(); | private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
VersionStatus versionStatus = controller.versionStatus();
return versionStatus.versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
.filter(VespaVersion::isReleased)
.map(VespaVersion::versionNumber)
.filter(version -> ! version.isAfter(oldestPlatform))
.max(Comparator.naturalOrder())
.orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
.filter(version -> ! version.isAfter(oldestPlatform))
.filter(version -> ! versionStatus.versions().stream()
.map(VespaVersion::versionNumber)
.collect(Collectors.toSet()).contains(version))
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("No available releases of " +
controller.mavenRepository().artifactId())));
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/cost")) return tenantCost(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/cost/{month}")) return tenantCost(path.get("tenant"), path.get("month"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/user")) return new EmptyResponse();
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant");
}
private HttpResponse authenticatedUser(HttpRequest request) {
Principal user = requireUserPrincipal(request);
String userName = user instanceof AthenzPrincipal ? ((AthenzPrincipal) user).getIdentity().getName() : user.getName();
List<Tenant> tenants = controller.tenants().asList(new Credentials(user));
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", userName);
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", true);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantCost(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenantCost(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenantCost(Tenant tenant, HttpRequest request) {
Set<YearMonth> months = controller.serviceRegistry().tenantCost().monthsWithMetering(tenant.name());
var slime = new Slime();
var objectCursor = slime.setObject();
var monthsCursor = objectCursor.setArray("months");
months.forEach(month -> monthsCursor.addString(month.toString()));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantCost(String tenantName, String dateString, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenantCost(tenant, tenantCostParseDate(dateString), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private YearMonth tenantCostParseDate(String dateString) {
try {
return YearMonth.parse(dateString);
} catch (DateTimeParseException e){
throw new IllegalArgumentException("Could not parse year-month '" + dateString + "'");
}
}
private HttpResponse tenantCost(Tenant tenant, YearMonth month, HttpRequest request) {
var slime = new Slime();
Cursor cursor = slime.setObject();
cursor.setString("month", month.toString());
List<CostInfo> costInfos = controller.serviceRegistry().tenantCost()
.getTenantCostOfMonth(tenant.name(), month);
Cursor array = cursor.setArray("items");
costInfos.forEach(costInfo -> {
Cursor costObject = array.addObject();
costObject.setString("applicationId", costInfo.getApplicationId().serializedForm());
costObject.setString("zoneId", costInfo.getZoneId().value());
Cursor cpu = costObject.setObject("cpu");
cpu.setDouble("usage", costInfo.getCpuHours().setScale(1, RoundingMode.HALF_UP).doubleValue());
cpu.setLong("charge", costInfo.getCpuCost());
Cursor memory = costObject.setObject("memory");
memory.setDouble("usage", costInfo.getMemoryHours().setScale(1, RoundingMode.HALF_UP).doubleValue());
memory.setLong("charge", costInfo.getMemoryCost());
Cursor disk = costObject.setObject("disk");
disk.setDouble("usage", costInfo.getDiskHours().setScale(1, RoundingMode.HALF_UP).doubleValue());
disk.setLong("charge", costInfo.getDiskCost());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
nodeObject.setDouble("vcpu", node.resources().vcpu());
nodeObject.setDouble("memoryGb", node.resources().memoryGb());
nodeObject.setDouble("diskGb", node.resources().diskGb());
nodeObject.setDouble("bandwidthGbps", node.resources().bandwidthGbps());
nodeObject.setString("diskSpeed", valueOf(node.resources().diskSpeed()));
nodeObject.setString("storageType", valueOf(node.resources().storageType()));
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case unorchestrated: return "unorchestrated";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
object.setString("compileVersion", compileVersion(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor deploymentJobsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentJobsArray.addObject();
jobObject.setString("type", job.id().type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.id().type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
object.setString("compileVersion", compileVersion(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
ZoneId.from(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, String cluster, Cursor object) {
object.setString("cluster", cluster);
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var instance = application.instances().get(deploymentId.applicationId().instance());
var endpointArray = response.setArray("endpoints");
var serviceUrls = new ArrayList<URI>();
for (var endpoint : controller.routing().endpointsOf(deploymentId)) {
toSlime(endpoint, endpoint.name(), endpointArray.addObject());
if (endpoint.routingMethod() == RoutingMethod.shared) {
serviceUrls.add(endpoint.url());
}
}
if (deploymentId.zoneId().environment().isProduction()) {
for (var endpoint : controller.routing().endpointsOf(instance).not().legacy()) {
toSlime(endpoint, "", endpointArray.addObject());
}
}
Cursor serviceUrlArray = response.setArray("serviceUrls");
serviceUrls.forEach(url -> serviceUrlArray.addString(url.toString()));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(false))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = ZoneId.from(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.entrySet().stream()
.forEach(entry -> {
String instanceName = entry.getKey().instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
entry.getValue().stream()
.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, restPath);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new MessageResponse("Requested restart of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().contentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
commit,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("x-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
switch (e.getErrorCode()) {
case NOT_FOUND:
return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
default:
return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/cost")) return tenantCost(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/cost/{month}")) return tenantCost(path.get("tenant"), path.get("month"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/user")) return new EmptyResponse();
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant");
}
private HttpResponse authenticatedUser(HttpRequest request) {
Principal user = requireUserPrincipal(request);
String userName = user instanceof AthenzPrincipal ? ((AthenzPrincipal) user).getIdentity().getName() : user.getName();
List<Tenant> tenants = controller.tenants().asList(new Credentials(user));
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", userName);
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", true);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantCost(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenantCost(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenantCost(Tenant tenant, HttpRequest request) {
Set<YearMonth> months = controller.serviceRegistry().tenantCost().monthsWithMetering(tenant.name());
var slime = new Slime();
var objectCursor = slime.setObject();
var monthsCursor = objectCursor.setArray("months");
months.forEach(month -> monthsCursor.addString(month.toString()));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantCost(String tenantName, String dateString, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenantCost(tenant, tenantCostParseDate(dateString), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private YearMonth tenantCostParseDate(String dateString) {
try {
return YearMonth.parse(dateString);
} catch (DateTimeParseException e){
throw new IllegalArgumentException("Could not parse year-month '" + dateString + "'");
}
}
private HttpResponse tenantCost(Tenant tenant, YearMonth month, HttpRequest request) {
var slime = new Slime();
Cursor cursor = slime.setObject();
cursor.setString("month", month.toString());
List<CostInfo> costInfos = controller.serviceRegistry().tenantCost()
.getTenantCostOfMonth(tenant.name(), month);
Cursor array = cursor.setArray("items");
costInfos.forEach(costInfo -> {
Cursor costObject = array.addObject();
costObject.setString("applicationId", costInfo.getApplicationId().serializedForm());
costObject.setString("zoneId", costInfo.getZoneId().value());
Cursor cpu = costObject.setObject("cpu");
cpu.setDouble("usage", costInfo.getCpuHours().setScale(1, RoundingMode.HALF_UP).doubleValue());
cpu.setLong("charge", costInfo.getCpuCost());
Cursor memory = costObject.setObject("memory");
memory.setDouble("usage", costInfo.getMemoryHours().setScale(1, RoundingMode.HALF_UP).doubleValue());
memory.setLong("charge", costInfo.getMemoryCost());
Cursor disk = costObject.setObject("disk");
disk.setDouble("usage", costInfo.getDiskHours().setScale(1, RoundingMode.HALF_UP).doubleValue());
disk.setLong("charge", costInfo.getDiskCost());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : controller.applications().asList(tenant)) {
if (applicationName.map(application.id().application().value()::equals).orElse(true)) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
try {
return Long.parseLong(build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid build number", e);
}
});
if (requestedBuild.isEmpty()) {
var application = controller.applications().requireApplication(tenantAndApplication);
var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty());
if (latestBuild.isEmpty()) {
throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'");
}
buildNumber = latestBuild.getAsLong();
} else {
buildNumber = requestedBuild.get();
}
var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber);
var filename = tenantAndApplication + "-build" + buildNumber + ".zip";
if (applicationPackage.isEmpty()) {
throw new NotExistsException("No application package found for '" +
tenantAndApplication +
"' with build number " + buildNumber);
}
return new ZipResponse(filename, applicationPackage.get());
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName) {
Slime slime = new Slime();
slime.setObject().setString("compileVersion",
compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.flavor());
nodeObject.setDouble("vcpu", node.resources().vcpu());
nodeObject.setDouble("memoryGb", node.resources().memoryGb());
nodeObject.setDouble("diskGb", node.resources().diskGb());
nodeObject.setDouble("bandwidthGbps", node.resources().bandwidthGbps());
nodeObject.setString("diskSpeed", valueOf(node.resources().diskSpeed()));
nodeObject.setString("storageType", valueOf(node.resources().storageType()));
nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case unorchestrated: return "unorchestrated";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
case combined: return "combined";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String valueOf(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion")));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
});
object.setString("compileVersion", compileVersion(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(deploymentSpec.requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor deploymentJobsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentJobsArray.addObject();
jobObject.setString("type", job.id().type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
globalEndpointsToSlime(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void globalEndpointsToSlime(Cursor object, Instance instance) {
var globalEndpointUrls = new LinkedHashSet<String>();
controller.routing().endpointsOf(instance.id())
.requiresRotation()
.not().legacy()
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalEndpointUrls::add);
var globalRotationsArray = object.setArray("globalRotations");
globalEndpointUrls.forEach(globalRotationsArray::addString);
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.latestVersion().ifPresent(version -> {
sourceRevisionToSlime(version.source(), object.setObject("source"));
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec().requireInstance(instance.name()))
.sortedJobs(status.instanceJobs(instance.name()).values());
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()));
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.id().type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
object.setString("compileVersion", compileVersion(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
globalEndpointsToSlime(object, instance);
List<Deployment> deployments =
application.deploymentSpec().instance(instance.name())
.map(spec -> new DeploymentSteps(spec, controller::system))
.map(steps -> steps.sortedDeployments(instance.deployments().values()))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment())
.map(job -> job.type().zone(controller.system()))
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
ZoneId.from(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Endpoint endpoint, String cluster, Cursor object) {
object.setString("cluster", cluster);
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
var instance = application.instances().get(deploymentId.applicationId().instance());
var endpointArray = response.setArray("endpoints");
var serviceUrls = new ArrayList<URI>();
for (var endpoint : controller.routing().endpointsOf(deploymentId)) {
toSlime(endpoint, endpoint.name(), endpointArray.addObject());
if (endpoint.routingMethod() == RoutingMethod.shared) {
serviceUrls.add(endpoint.url());
}
}
if (deploymentId.zoneId().environment().isProduction()) {
for (var endpoint : controller.routing().endpointsOf(instance).not().legacy()) {
toSlime(endpoint, "", endpointArray.addObject());
}
}
Cursor serviceUrlArray = response.setArray("serviceUrls");
serviceUrls.forEach(url -> serviceUrlArray.addString(url.toString()));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
JobType.from(controller.system(), deployment.zone())
.map(type -> new JobId(instance.id(), type))
.map(status.jobSteps()::get)
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.applicationVersionToSlime(
response.setObject("applicationVersion"), deployment.applicationVersion());
if (!status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(false))
response.setString("status", "pending");
else response.setString("status", "running");
});
}
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if ( ! applicationVersion.isUnknown()) {
object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong());
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if (revision.isEmpty()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
/**
* Returns a non-broken, released version at least as old as the oldest platform the given application is on.
*
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
var deploymentId = new DeploymentId(instance.id(), zone);
setGlobalRotationStatus(deploymentId, inService, request);
setGlobalEndpointStatus(deploymentId, inService, request);
return new MessageResponse(String.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
/** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */
private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
}
/** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */
private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) {
var requestData = toSlime(request.getData()).get();
var reason = mandatory("reason", requestData).asString();
var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant;
long timestamp = controller.clock().instant().getEpochSecond();
var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp);
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
controller.routing().globalRotationStatus(deploymentId)
.forEach((endpoint, status) -> {
array.addString(endpoint.upstreamIdOf(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.getStatus().name());
statusObject.setString("reason", status.getReason() == null ? "" : status.getReason());
statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent());
statusObject.setLong("timestamp", status.getEpoch());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = ZoneId.from(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse metering(String tenant, String application, HttpRequest request) {
Slime slime = new Slime();
Cursor root = slime.setObject();
MeteringData meteringData = controller.serviceRegistry()
.meteringService()
.getMeteringData(TenantName.from(tenant), ApplicationName.from(application));
ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
currentRate.setDouble("disk", currentSnapshot.getDiskGb());
ResourceAllocation thisMonth = meteringData.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
thismonth.setDouble("cpu", thisMonth.getCpuCores());
thismonth.setDouble("mem", thisMonth.getMemoryGb());
thismonth.setDouble("disk", thisMonth.getDiskGb());
ResourceAllocation lastMonth = meteringData.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
lastmonth.setDouble("cpu", lastMonth.getCpuCores());
lastmonth.setDouble("mem", lastMonth.getMemoryGb());
lastmonth.setDouble("disk", lastMonth.getDiskGb());
Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
Cursor detailsMem = details.setObject("mem");
Cursor detailsDisk = details.setObject("disk");
history.entrySet().stream()
.forEach(entry -> {
String instanceName = entry.getKey().instance().value();
Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
Cursor detailsMemApp = detailsMem.setObject(instanceName);
Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
Cursor detailsCpuData = detailsCpuApp.setArray("data");
Cursor detailsMemData = detailsMemApp.setArray("data");
Cursor detailsDiskData = detailsDiskApp.setArray("data");
entry.getValue().stream()
.forEach(resourceSnapshot -> {
Cursor cpu = detailsCpuData.addObject();
cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
cpu.setDouble("value", resourceSnapshot.getCpuCores());
Cursor mem = detailsMemData.addObject();
mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
mem.setDouble("value", resourceSnapshot.getMemoryGb());
Cursor disk = detailsDiskData.addObject();
disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
disk.setDouble("value", resourceSnapshot.getDiskGb());
});
});
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region));
if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) {
String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, restPath);
return new HtmlResponse(result);
}
Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(),
deploymentId.applicationId(),
controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName));
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = Change.of(application.get().latestVersion().get());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new MessageResponse("Requested restart of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone(controller.system())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
controller.jobController().deploy(id, type, version, applicationPackage);
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the proxy application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Optional<Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(deployment.isEmpty())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(),
Optional.of(applicationId.instance()),
Optional.of(zone),
aPackage,
Optional.of(requireUserPrincipal(request))));
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass);
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if (tenant.isEmpty())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(defaultInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(defaultInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
var testedZone = type.zone(controller.system());
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
controller.routing().zoneEndpointsOf(deployments),
controller.applications().contentClustersByZone(deployments)));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
List<Application> applications = controller.applications().asList(tenant.name());
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
break;
}
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), instance, status, request);
else
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(Run run, Cursor object) {
object.setLong("id", run.id().number());
object.setString("version", run.versions().targetPlatform().toFullString());
if ( ! run.versions().targetApplication().isUnknown())
toSlime(run.versions().targetApplication(), object.setObject("revision"));
object.setString("reason", "unknown reason");
object.setLong("at", run.end().orElse(run.start()).toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
sourceUrl,
commit,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("x-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
switch (state) {
case in: return "IN";
case out: return "OUT";
}
return "UNKNOWN";
}
private static String endpointScopeString(Endpoint.Scope scope) {
switch (scope) {
case global: return "global";
case zone: return "zone";
}
throw new IllegalArgumentException("Unknown endpoint scope " + scope);
}
private static String routingMethodString(RoutingMethod method) {
switch (method) {
case exclusive: return "exclusive";
case shared: return "shared";
case sharedLayer4: return "sharedLayer4";
}
throw new IllegalArgumentException("Unknown routing method " + method);
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
} |
😆 | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(nodesDownTimeout));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + nodesDownTimeout.toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(noNodesDownTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason = "No nodes allowed to suspend to progress installation for " + noNodesDownTimeout.toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log("
logger.log(nodeList.not().in(nodeList.not().needsNewConfig()
.not().needsPlatformUpgrade()
.not().needsReboot()
.not().needsRestart()
.not().needsFirmwareUpgrade()
.not().needsOsUpgrade())
.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | .not().needsOsUpgrade()) | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(nodesDownTimeout));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + nodesDownTimeout.toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(noNodesDownTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason = "No nodes allowed to suspend to progress installation for " + noNodesDownTimeout.toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log("
logger.log(nodeList.not().in(nodeList.not().needsNewConfig()
.not().needsPlatformUpgrade()
.not().needsReboot()
.not().needsRestart()
.not().needsFirmwareUpgrade()
.not().needsOsUpgrade())
.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration capacityTimeout = Duration.ofMinutes(5);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration endpointCertificateTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration nodesDownTimeout = Duration.ofMinutes(60);
static final Duration noNodesDownTimeout = Duration.ofMinutes(120);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(capacityTimeout).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCname(endpointName);
if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) {
logger.log(INFO, "CNAME '" + endpointName + "' points at " +
cNameValue.map(name -> "'" + name + "'").orElse("nothing") +
" but should point at load balancer '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration capacityTimeout = Duration.ofMinutes(5);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration endpointCertificateTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration nodesDownTimeout = Duration.ofMinutes(60);
static final Duration noNodesDownTimeout = Duration.ofMinutes(120);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(capacityTimeout).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCname(endpointName);
if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) {
logger.log(INFO, "CNAME '" + endpointName + "' points at " +
cNameValue.map(name -> "'" + name + "'").orElse("nothing") +
" but should point at load balancer '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
Nothing quite like set arithmetics :p | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(nodesDownTimeout));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + nodesDownTimeout.toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(noNodesDownTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason = "No nodes allowed to suspend to progress installation for " + noNodesDownTimeout.toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log("
logger.log(nodeList.not().in(nodeList.not().needsNewConfig()
.not().needsPlatformUpgrade()
.not().needsReboot()
.not().needsRestart()
.not().needsFirmwareUpgrade()
.not().needsOsUpgrade())
.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | .not().needsOsUpgrade()) | private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
Run run = controller.jobController().run(id).get();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
id.application(),
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()),
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
boolean firstTick = run.convergenceSummary().isEmpty();
if (firstTick) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
}
ConvergenceSummary summary = nodeList.summary();
if (summary.converged()) {
controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null));
if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) {
if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
}
else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
String failureReason = null;
NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(nodesDownTimeout));
if ( ! suspendedTooLong.isEmpty()) {
failureReason = "Some nodes have been suspended for more than " + nodesDownTimeout.toMinutes() + " minutes:\n" +
suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n"));
}
if (run.noNodesDownSince()
.map(since -> since.isBefore(controller.clock().instant().minus(noNodesDownTimeout)))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
failureReason = "No nodes allowed to suspend to progress installation for " + noNodesDownTimeout.toMinutes() + " minutes.";
else
failureReason = "Nodes not able to start with new application package.";
}
Duration timeout = JobRunner.jobTimeout.minusHours(1);
if (timedOut(id, deployment.get(), timeout)) {
failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!";
}
if (failureReason != null) {
logger.log("
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log("
logger.log(nodeList.not().in(nodeList.not().needsNewConfig()
.not().needsPlatformUpgrade()
.not().needsReboot()
.not().needsRestart()
.not().needsFirmwareUpgrade()
.not().needsOsUpgrade())
.asList().stream()
.flatMap(node -> nodeDetails(node, true))
.collect(toList()));
logger.log(INFO, failureReason);
return Optional.of(installationFailed);
}
if ( ! firstTick)
logger.log(nodeList.expectedDown().concat(nodeList.needsNewConfig()).asList().stream()
.distinct()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
controller.jobController().locked(id, lockedRun -> {
Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null;
return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary);
});
return Optional.empty();
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration capacityTimeout = Duration.ofMinutes(5);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration endpointCertificateTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration nodesDownTimeout = Duration.ofMinutes(60);
static final Duration noNodesDownTimeout = Duration.ofMinutes(120);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(capacityTimeout).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCname(endpointName);
if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) {
logger.log(INFO, "CNAME '" + endpointName + "' points at " +
cNameValue.map(name -> "'" + name + "'").orElse("nothing") +
" but should point at load balancer '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final NodeResources DEFAULT_TESTER_RESOURCES =
new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any);
static final NodeResources DEFAULT_TESTER_RESOURCES_AWS =
new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any);
static final Duration capacityTimeout = Duration.ofMinutes(5);
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration endpointCertificateTimeout = Duration.ofMinutes(15);
static final Duration testerTimeout = Duration.ofMinutes(30);
static final Duration nodesDownTimeout = Duration.ofMinutes(60);
static final Duration noNodesDownTimeout = Duration.ofMinutes(120);
static final Duration certificateTimeout = Duration.ofMinutes(300);
private final Controller controller;
private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployTester: return deployTester(id, logger);
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case installTester: return installTester(id, logger);
case installReal: return installReal(id, logger);
case startStagingSetup: return startTests(id, true, logger);
case endStagingSetup:
case endTests: return endTests(id, logger);
case startTests: return startTests(id, false, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (step.get().alwaysRun()) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy2(id.job(), setTheStage),
controller.jobController().run(id).get()
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.systemVersion();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
platform),
controller.jobController().run(id).get()
.stepInfo(deployTester).get()
.startTime().get(),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment,
Instant startTime, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if (prepareResponse.log != null)
logger.logAll(prepareResponse.log.stream()
.map(entry -> new LogEntry(0,
Instant.ofEpochMilli(entry.time),
LogEntry.typeOf(LogLevel.parse(entry.level)),
entry.message))
.collect(toList()));
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Schedule service restart on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
if (prepareResponse.message != null)
logger.log(prepareResponse.message);
return Optional.of(running);
}
catch (ConfigServerException e) {
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
switch (e.getErrorCode()) {
case CERTIFICATE_NOT_READY:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
logger.log(e.getServerMessage());
return result;
case OUT_OF_CAPACITY:
logger.log(e.getServerMessage());
return controller.system().isCd() && startTime.plus(capacityTimeout).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
logger.log(e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
catch (EndpointCertificateException e) {
switch (e.type()) {
case CERT_NOT_AVAILABLE:
if (startTime.plus(endpointCertificateTimeout).isBefore(controller.clock().instant())) {
logger.log("Deployment failed to find provisioned endpoint certificate after " + endpointCertificateTimeout);
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return Optional.empty();
default:
throw e;
}
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Run run = controller.jobController().run(id).get();
Version platform = controller.systemVersion();
ZoneId zone = id.type().zone(controller.system());
ApplicationId testerId = id.tester().id();
Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone),
Optional.of(platform));
if (services.isEmpty()) {
logger.log("Config status not currently available -- will retry.");
return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5)))
? Optional.of(error)
: Optional.empty();
}
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone,
testerId,
ImmutableSet.of(active, reserved));
List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone,
nodes.stream().map(node -> node.parentHostname().get()).collect(toList()));
NodeList nodeList = NodeList.of(nodes, parents, services.get());
logger.log(nodeList.asList().stream()
.flatMap(node -> nodeDetails(node, false))
.collect(toList()));
if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (run.stepInfo(installTester).get().startTime().get().plus(testerTimeout).isBefore(controller.clock().instant())) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
return Optional.empty();
}
/** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId)));
if ( ! endpoints.containsKey(zoneId))
return false;
for (var endpoint : endpoints.get(zoneId)) {
boolean ready = controller.jobController().cloud().ready(endpoint.url());
if ( ! ready) {
logger.log("Failed to get 100 consecutive OKs from " + endpoint);
return false;
}
}
return true;
}
/** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */
private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) {
DeploymentId deploymentId = new DeploymentId(id, zoneId);
if (controller.jobController().cloud().testerReady(deploymentId)) {
return true;
} else {
logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId);
return false;
}
}
private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) {
var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone)));
if ( ! endpoints.containsKey(zone)) {
logger.log("Endpoints not yet ready.");
return false;
}
var policies = controller.routing().policies().get(new DeploymentId(id, zone));
for (var endpoint : endpoints.get(zone)) {
HostName endpointName = HostName.from(endpoint.dnsName());
var ipAddress = controller.jobController().cloud().resolveHostName(endpointName);
if (ipAddress.isEmpty()) {
logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'.");
return false;
}
if (endpoint.routingMethod() == RoutingMethod.exclusive) {
var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone));
if (policy == null)
throw new IllegalStateException(endpoint + " has no matching policy in " + policies);
var cNameValue = controller.jobController().cloud().resolveCname(endpointName);
if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) {
logger.log(INFO, "CNAME '" + endpointName + "' points at " +
cNameValue.map(name -> "'" + name + "'").orElse("nothing") +
" but should point at load balancer '" + policy.canonicalName() + "'");
return false;
}
var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName());
if ( ! loadBalancerAddress.equals(ipAddress)) {
logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" +
policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal");
return false;
}
}
}
logEndpoints(endpoints, logger);
return true;
}
private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
zoneEndpoints.forEach((zone, endpoints) -> {
messages.add("- " + zone);
for (Endpoint endpoint : endpoints)
messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')");
});
logger.log(messages);
}
private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) {
return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")),
"--- platform " + node.node().wantedVersion() + (node.needsPlatformUpgrade()
? " <-- " + (node.node().currentVersion().isEmpty() ? "not booted" : node.node().currentVersion())
: "") +
(node.needsOsUpgrade() && node.isAllowedDown()
? ", upgrading OS (" + node.node().wantedOsVersion() + " <-- " + node.node().currentOsVersion() + ")"
: "") +
(node.needsFirmwareUpgrade() && node.isAllowedDown()
? ", upgrading firmware"
: "") +
(node.needsRestart()
? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")"
: "") +
(node.needsReboot()
? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")"
: "")),
node.services().stream()
.filter(service -> printAllServices || node.needsNewConfig())
.map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1
? " has not started "
: " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration())));
}
private String humanize(Node.ServiceState state) {
switch (state) {
case allowedDown: return "allowed to be DOWN";
case expectedUp: return "expected to be UP";
case unorchestrated: return "unorchestrated";
default: return state.name();
}
}
private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if (deployment.isEmpty()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(error);
}
var deployments = controller.applications().requireInstance(id.application())
.productionDeployments().keySet().stream()
.map(zone -> new DeploymentId(id.application(), zone))
.collect(Collectors.toSet());
ZoneId zoneId = id.type().zone(controller.system());
deployments.add(new DeploymentId(id.application(), zoneId));
logger.log("Attempting to find endpoints ...");
var endpoints = controller.routing().zoneEndpointsOf(deployments);
if ( ! endpoints.containsKey(zoneId)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) {
logger.log(WARNING, "Tester container went bad!");
return Optional.of(error);
}
logger.log("Starting tests ...");
TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup);
byte[] config = testConfigSerializer.configJson(id.application(),
id.type(),
true,
endpoints,
controller.applications().contentClustersByZone(deployments));
controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config);
return Optional.of(running);
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isEmpty()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate();
if (testerCertificate.isPresent()) {
try {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
logger.log(INFO, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id));
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
if (deployment(id.application(), id.type()).isPresent())
try {
controller.jobController().updateVespaLog(id);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
return Optional.of(error);
}
return Optional.of(running);
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
try {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting application " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
try {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
catch (RuntimeException e) {
logger.log(WARNING, "Failed deleting tester of " + id.application(), e);
Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get();
return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(error)
: Optional.empty();
}
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e);
return Optional.of(error);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
switch (run.status()) {
case running:
case aborted:
case success:
return Optional.empty();
case outOfCapacity:
return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty();
case deploymentFailed:
return Optional.of(mails.deploymentFailure(run.id(), recipients));
case installationFailed:
return Optional.of(mails.installationFailure(run.id(), recipients));
case testFailure:
return Optional.of(mails.testFailure(run.id(), recipients));
case error:
case endpointCertificateTimeout:
return Optional.of(mails.systemError(run.id(), recipients));
default:
logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'");
return Optional.of(mails.systemError(run.id(), recipients));
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { });
return controller.applications().requireInstance(id);
}
/**
* Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout.
*
* We time out the job before the deployment expires, for zones where deployments are not persistent,
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
Run run = controller.jobController().run(id).get();
if ( ! controller.system().isCd() && run.start().isAfter(deployment.at()))
return false;
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1))));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest();
byte[] servicesXml = servicesXml(! controller.system().isPublic(),
useTesterCertificate,
testerResourcesFor(zone, spec.requireInstance(id.application().instance())));
byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version);
byte[] deploymentXml = deploymentXml(id.tester(),
spec.athenzDomain(),
spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
if (useTesterCertificate)
appendAndStoreCertificate(zipBuilder, id);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048);
X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number());
X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair,
subject,
controller.clock().instant(),
controller.clock().instant().plus(certificateTimeout),
SignatureAlgorithm.SHA512_WITH_RSA,
BigInteger.valueOf(1))
.build();
controller.jobController().storeTesterCertificate(id, certificate);
zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8));
zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8));
}
private DeploymentId getTesterDeploymentId(RunId runId) {
ZoneId zoneId = runId.type().zone(controller.system());
return new DeploymentId(runId.tester().id(), zoneId);
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
return spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources) {
int jdiscMemoryGb = 2;
int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb());
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
String resourceString = String.format(Locale.ENGLISH,
"<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='tester'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" +
" <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" +
" " + resourceString + "\n" +
" </nodes>\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
" <instance id=\"" + id.id().instance().value() + "\" />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void logAll(List<LogEntry> messages) {
controller.jobController().log(id, step, messages);
}
private void log(List<String> messages) {
controller.jobController().log(id, step, INFO, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void logWithInternalException(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
controller.jobController().log(id, step, level, message);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
Can `utf8` ever be `null` here? | public Utf8Array toUtf8() {
return utf8 != null ? utf8 : new Utf8String(toString());
} | return utf8 != null ? utf8 : new Utf8String(toString()); | public Utf8Array toUtf8() {
return utf8;
} | class Version implements Comparable<Version> {
private final int major;
private final int minor;
private final int micro;
private final String qualifier;
private final Utf8Array utf8;
/** The empty version */
public static final Version emptyVersion = new Version();
/** Creates an empty version */
public Version() {
this(0, 0, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major) {
this(major, 0, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major, int minor) {
this(major, minor, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @param micro micro component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major, int minor, int micro) {
this(major, minor, micro, "");
}
/**
* Creates a version identifier from the specified components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @param micro micro component of the version identifier
* @param qualifier Qualifier component of the version identifier, or null if not specified
* @throws IllegalArgumentException if the numerical components are negative
* the qualifier string contains non-word/digit-characters, or
* an earlier component is not specified but a later one is
*/
public Version(int major, int minor, int micro, String qualifier) {
this.major = major;
this.minor = minor;
this.micro = micro;
this.qualifier = (qualifier != null) ? qualifier : "";
utf8 = new Utf8String(toString());
verify();
}
/**
* Creates a version identifier from the specified string.
*
* <p>
* Version strings follows this grammar (same as Osgi versions):
*
* <pre>
* version ::= major('.'minor('.'micro('.'qualifier)?)?)?
* major ::= digit+
* minor ::= digit+
* micro ::= digit+
* qualifier ::= (alpha|digit|'_'|'-')+
* digit ::= [0..9]
* alpha ::= [a..zA..Z]
* </pre>
*
* @param versionString String representation of the version identifier
* @throws IllegalArgumentException If <code>version</code> is improperly formatted.
*/
public Version(String versionString) {
if (! "".equals(versionString)) {
String[] components=versionString.split("\\.");
major = (components.length > 0) ? Integer.parseInt(components[0]) : 0;
minor = (components.length > 1) ? Integer.parseInt(components[1]) : 0;
micro = (components.length > 2) ? Integer.parseInt(components[2]) : 0;
qualifier = (components.length > 3) ? components[3] : "";
if (components.length > 4)
throw new IllegalArgumentException("Too many components in '" + versionString + "'");
} else {
major = 0;
minor = 0;
micro = 0;
qualifier = "";
}
utf8 = new Utf8String(versionString);
verify();
}
static private int readInt(ByteBuffer bb) {
int accum=0;
for (int i=bb.remaining(); i > 0; i--) {
byte b=bb.get();
if (b >= 0x30 && b <= 0x39) {
accum = accum * 10 + (b-0x30);
} else if (b == 0x2e) {
return accum;
} else {
throw new IllegalArgumentException("Failed decoding integer from utf8stream. Stream = " + bb.toString());
}
}
return accum;
}
/**
* Creates a version identifier from the specified string.
*
* <p>
* Version strings follows this grammar (same as Osgi versions):
*
* <pre>
* version ::= major('.'minor('.'micro('.'qualifier)?)?)?
* major ::= digit+
* minor ::= digit+
* micro ::= digit+
* qualifier ::= (alpha|digit|'_'|'-')+
* digit ::= [0..9]
* alpha ::= [a..zA..Z]
* </pre>
*
* @param versionString String representation of the version identifier
* @throws IllegalArgumentException If <code>version</code> is improperly
* formatted.
*/
public Version(Utf8Array versionString) {
ByteBuffer bb = versionString.wrap();
if (bb.remaining() > 0) {
major = readInt(bb);
if (bb.remaining() > 0) {
minor = readInt(bb);
if (bb.remaining() > 0) {
micro = readInt(bb);
qualifier = (bb.remaining() > 0) ? Utf8.toString(bb) : "";
} else {
micro = 0;
qualifier = "";
}
} else {
minor = 0;
micro = 0;
qualifier = "";
}
} else {
throw new IllegalArgumentException("Empty version specification");
}
utf8 = versionString;
verify();
}
/** Returns new Version(versionString), or Version.emptyVersion if the input string is null or "" */
public static Version fromString(String versionString) {
return (versionString == null) ? emptyVersion :new Version(versionString);
}
/**
* Must be called on construction after the component values are set
*
* @throws IllegalArgumentException If the numerical components are negative
* or the qualifier string is invalid.
*/
private void verify() {
if (major < 0)
throw new IllegalArgumentException("Negative major in " + this);
if (minor < 0)
throw new IllegalArgumentException("Negative minor in " + this);
if (micro < 0)
throw new IllegalArgumentException("Negative micro in " + this);
for (int i = 0; i < qualifier.length(); i++) {
char c = qualifier.charAt(i);
if (!Character.isLetterOrDigit(c))
throw new IllegalArgumentException("Invalid qualifier in " + this +
": Invalid character at position " + i + " in qualifier");
}
}
private String toStringValue() {
StringBuilder b = new StringBuilder();
if (! qualifier.isEmpty()) {
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro()).append(".").append(qualifier);
} else if (getMicro() != 0) {
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro());
} else if (getMinor() != 0) {
b.append(getMajor()).append(".").append(getMinor());
} else if (getMajor() != 0) {
b.append(getMajor());
}
return b.toString();
}
/**
* Returns the string representation of this version identifier as major.minor.micro.qualifier,
* omitting .qualifier if qualifier empty or unspecified
* <p>
* This string form is part of the API of Version and will never change.
*/
public String toFullString() {
StringBuilder b = new StringBuilder();
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro());
if (! qualifier.isEmpty()) {
b.append(".");
b.append(qualifier);
}
return b.toString();
}
/** Returns the major component of this version, or 0 if not specified */
public int getMajor() { return major; }
/** Returns the minor component of this version, or 0 if not specified */
public int getMinor() { return minor; }
/** Returns the micro component of this version, or 0 if not specified */
public int getMicro() { return micro; }
/** Returns the qualifier component of this version, or "" if not specified */
public String getQualifier() { return qualifier; }
/**
* Returns the string representation of this version identifier as major.minor.micro.qualifier,
* omitting the remaining parts after reaching the first unspecified component.
* Unspecified version component is equivalent to 0 (or the empty string for qualifier).
* <p>
* The string representation of a Version specified here is a part of the API and will never change.
*/
@Override
public String toString() { return toStringValue(); }
@Override
public int hashCode() { return major*3 + minor*5 + micro*7 + qualifier.hashCode()*11; }
/** Returns whether this equals the empty version */
public boolean isEmpty() { return this.equals(emptyVersion); }
/**
* Compares this <code>Version</code> to another.
*
* <p>
* A version is considered to be <b>equal to </b> another version if the
* major, minor and micro components are equal and the qualifier component
* is equal (using <code>String.equals</code>).
* <p>
*
* @param object The <code>Version</code> object to be compared.
* @return <code>true</code> if <code>object</code> is a
* <code>Version</code> and is equal to this object;
* <code>false</code> otherwise.
*/
@Override
public boolean equals(Object object) {
if ( ! (object instanceof Version)) return false;
Version other = (Version) object;
if (this.major != other.major) return false;
if (this.minor != other.minor) return false;
if (this.micro != other.micro) return false;
return (this.qualifier.equals(other.qualifier));
}
@SuppressWarnings("unused")
private boolean equals(Object o1, Object o2) {
if (o1 == null && o2 == null) return true;
if (o1 == null || o2 == null) return false;
return o1.equals(o2);
}
/**
* Compares this <code>Version</code> object to another version.
* <p>
* A version is considered to be <b>less than </b> another version if its
* major component is less than the other version's major component, or the
* major components are equal and its minor component is less than the other
* version's minor component, or the major and minor components are equal
* and its micro component is less than the other version's micro component,
* or the major, minor and micro components are equal and it's qualifier
* component is less than the other version's qualifier component (using
* <code>String.compareTo</code>).
* <p>
* A version is considered to be <b>equal to</b> another version if the
* major, minor and micro components are equal and the qualifier component
* is equal (using <code>String.compareTo</code>).
* <p>
* Unspecified numeric components are treated as 0, unspecified qualifier is treated as the empty string.
*
* @param other the <code>Version</code> object to be compared.
* @return A negative integer, zero, or a positive integer if this object is
* less than, equal to, or greater than the specified <code>Version</code> object.
* @throws ClassCastException if the specified object is not a <code>Version</code>.
*/
@Override
public int compareTo(Version other) {
if (other == this) return 0;
int result = this.getMajor() - other.getMajor();
if (result != 0) return result;
result = this.getMinor() - other.getMinor();
if (result != 0) return result;
result = this.getMicro() - other.getMicro();
if (result != 0) return result;
return getQualifier().compareTo(other.getQualifier());
}
/**
* Returns whether this version number is strictly lower than the given version. This has the same semantics as
* {@link Version
*/
public boolean isBefore(Version other) {
return compareTo(other) < 0;
}
/**
* Returns whether this version number is strictly higher than the given version. This has the same semantics as
* {@link Version
*/
public boolean isAfter(Version other) {
return compareTo(other) > 0;
}
/** Creates a version specification that only matches this version */
public VersionSpecification toSpecification() {
return (this == emptyVersion)
? VersionSpecification.emptyVersionSpecification
: new VersionSpecification(getMajor(), getMinor(), getMicro(), getQualifier());
}
} | class Version implements Comparable<Version> {
private final int major;
private final int minor;
private final int micro;
private final String qualifier;
private final Utf8Array utf8;
/** The empty version */
public static final Version emptyVersion = new Version();
/** Creates an empty version */
public Version() {
this(0, 0, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major) {
this(major, 0, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major, int minor) {
this(major, minor, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @param micro micro component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major, int minor, int micro) {
this(major, minor, micro, "");
}
/**
* Creates a version identifier from the specified components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @param micro micro component of the version identifier
* @param qualifier Qualifier component of the version identifier, or null if not specified
* @throws IllegalArgumentException if the numerical components are negative
* the qualifier string contains non-word/digit-characters, or
* an earlier component is not specified but a later one is
*/
public Version(int major, int minor, int micro, String qualifier) {
this.major = major;
this.minor = minor;
this.micro = micro;
this.qualifier = (qualifier != null) ? qualifier : "";
utf8 = new Utf8String(toString());
verify();
}
/**
* Creates a version identifier from the specified string.
*
* <p>
* Version strings follows this grammar (same as Osgi versions):
*
* <pre>
* version ::= major('.'minor('.'micro('.'qualifier)?)?)?
* major ::= digit+
* minor ::= digit+
* micro ::= digit+
* qualifier ::= (alpha|digit|'_'|'-')+
* digit ::= [0..9]
* alpha ::= [a..zA..Z]
* </pre>
*
* @param versionString String representation of the version identifier
* @throws IllegalArgumentException If <code>version</code> is improperly formatted.
*/
public Version(String versionString) {
if (! "".equals(versionString)) {
String[] components=versionString.split("\\.");
major = (components.length > 0) ? Integer.parseInt(components[0]) : 0;
minor = (components.length > 1) ? Integer.parseInt(components[1]) : 0;
micro = (components.length > 2) ? Integer.parseInt(components[2]) : 0;
qualifier = (components.length > 3) ? components[3] : "";
if (components.length > 4)
throw new IllegalArgumentException("Too many components in '" + versionString + "'");
} else {
major = 0;
minor = 0;
micro = 0;
qualifier = "";
}
utf8 = new Utf8String(versionString);
verify();
}
static private int readInt(ByteBuffer bb) {
int accum=0;
for (int i=bb.remaining(); i > 0; i--) {
byte b=bb.get();
if (b >= 0x30 && b <= 0x39) {
accum = accum * 10 + (b-0x30);
} else if (b == 0x2e) {
return accum;
} else {
throw new IllegalArgumentException("Failed decoding integer from utf8stream. Stream = " + bb.toString());
}
}
return accum;
}
/**
* Creates a version identifier from the specified string.
*
* <p>
* Version strings follows this grammar (same as Osgi versions):
*
* <pre>
* version ::= major('.'minor('.'micro('.'qualifier)?)?)?
* major ::= digit+
* minor ::= digit+
* micro ::= digit+
* qualifier ::= (alpha|digit|'_'|'-')+
* digit ::= [0..9]
* alpha ::= [a..zA..Z]
* </pre>
*
* @param versionString String representation of the version identifier
* @throws IllegalArgumentException If <code>version</code> is improperly
* formatted.
*/
public Version(Utf8Array versionString) {
ByteBuffer bb = versionString.wrap();
if (bb.remaining() > 0) {
major = readInt(bb);
if (bb.remaining() > 0) {
minor = readInt(bb);
if (bb.remaining() > 0) {
micro = readInt(bb);
qualifier = (bb.remaining() > 0) ? Utf8.toString(bb) : "";
} else {
micro = 0;
qualifier = "";
}
} else {
minor = 0;
micro = 0;
qualifier = "";
}
} else {
throw new IllegalArgumentException("Empty version specification");
}
utf8 = versionString;
verify();
}
/** Returns new Version(versionString), or Version.emptyVersion if the input string is null or "" */
public static Version fromString(String versionString) {
return (versionString == null) ? emptyVersion :new Version(versionString);
}
/**
* Must be called on construction after the component values are set
*
* @throws IllegalArgumentException If the numerical components are negative
* or the qualifier string is invalid.
*/
private void verify() {
if (major < 0)
throw new IllegalArgumentException("Negative major in " + this);
if (minor < 0)
throw new IllegalArgumentException("Negative minor in " + this);
if (micro < 0)
throw new IllegalArgumentException("Negative micro in " + this);
for (int i = 0; i < qualifier.length(); i++) {
char c = qualifier.charAt(i);
if (!Character.isLetterOrDigit(c))
throw new IllegalArgumentException("Invalid qualifier in " + this +
": Invalid character at position " + i + " in qualifier");
}
}
private String toStringValue() {
StringBuilder b = new StringBuilder();
if (! qualifier.isEmpty()) {
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro()).append(".").append(qualifier);
} else if (getMicro() != 0) {
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro());
} else if (getMinor() != 0) {
b.append(getMajor()).append(".").append(getMinor());
} else if (getMajor() != 0) {
b.append(getMajor());
}
return b.toString();
}
/**
* Returns the string representation of this version identifier as major.minor.micro.qualifier,
* omitting .qualifier if qualifier empty or unspecified
* <p>
* This string form is part of the API of Version and will never change.
*/
public String toFullString() {
StringBuilder b = new StringBuilder();
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro());
if (! qualifier.isEmpty()) {
b.append(".");
b.append(qualifier);
}
return b.toString();
}
/** Returns the major component of this version, or 0 if not specified */
public int getMajor() { return major; }
/** Returns the minor component of this version, or 0 if not specified */
public int getMinor() { return minor; }
/** Returns the micro component of this version, or 0 if not specified */
public int getMicro() { return micro; }
/** Returns the qualifier component of this version, or "" if not specified */
public String getQualifier() { return qualifier; }
/**
* Returns the string representation of this version identifier as major.minor.micro.qualifier,
* omitting the remaining parts after reaching the first unspecified component.
* Unspecified version component is equivalent to 0 (or the empty string for qualifier).
* <p>
* The string representation of a Version specified here is a part of the API and will never change.
*/
@Override
public String toString() { return toStringValue(); }
@Override
public int hashCode() { return major*3 + minor*5 + micro*7 + qualifier.hashCode()*11; }
/** Returns whether this equals the empty version */
public boolean isEmpty() { return this.equals(emptyVersion); }
/**
* Compares this <code>Version</code> to another.
*
* <p>
* A version is considered to be <b>equal to </b> another version if the
* major, minor and micro components are equal and the qualifier component
* is equal (using <code>String.equals</code>).
* <p>
*
* @param object The <code>Version</code> object to be compared.
* @return <code>true</code> if <code>object</code> is a
* <code>Version</code> and is equal to this object;
* <code>false</code> otherwise.
*/
@Override
public boolean equals(Object object) {
if ( ! (object instanceof Version)) return false;
Version other = (Version) object;
if (this.major != other.major) return false;
if (this.minor != other.minor) return false;
if (this.micro != other.micro) return false;
return (this.qualifier.equals(other.qualifier));
}
@SuppressWarnings("unused")
private boolean equals(Object o1, Object o2) {
if (o1 == null && o2 == null) return true;
if (o1 == null || o2 == null) return false;
return o1.equals(o2);
}
/**
* Compares this <code>Version</code> object to another version.
* <p>
* A version is considered to be <b>less than </b> another version if its
* major component is less than the other version's major component, or the
* major components are equal and its minor component is less than the other
* version's minor component, or the major and minor components are equal
* and its micro component is less than the other version's micro component,
* or the major, minor and micro components are equal and it's qualifier
* component is less than the other version's qualifier component (using
* <code>String.compareTo</code>).
* <p>
* A version is considered to be <b>equal to</b> another version if the
* major, minor and micro components are equal and the qualifier component
* is equal (using <code>String.compareTo</code>).
* <p>
* Unspecified numeric components are treated as 0, unspecified qualifier is treated as the empty string.
*
* @param other the <code>Version</code> object to be compared.
* @return A negative integer, zero, or a positive integer if this object is
* less than, equal to, or greater than the specified <code>Version</code> object.
* @throws ClassCastException if the specified object is not a <code>Version</code>.
*/
@Override
public int compareTo(Version other) {
if (other == this) return 0;
int result = this.getMajor() - other.getMajor();
if (result != 0) return result;
result = this.getMinor() - other.getMinor();
if (result != 0) return result;
result = this.getMicro() - other.getMicro();
if (result != 0) return result;
return getQualifier().compareTo(other.getQualifier());
}
/**
* Returns whether this version number is strictly lower than the given version. This has the same semantics as
* {@link Version
*/
public boolean isBefore(Version other) {
return compareTo(other) < 0;
}
/**
* Returns whether this version number is strictly higher than the given version. This has the same semantics as
* {@link Version
*/
public boolean isAfter(Version other) {
return compareTo(other) > 0;
}
/** Creates a version specification that only matches this version */
public VersionSpecification toSpecification() {
return (this == emptyVersion)
? VersionSpecification.emptyVersionSpecification
: new VersionSpecification(getMajor(), getMinor(), getMicro(), getQualifier());
}
} |
Good point, not anymore. | public Utf8Array toUtf8() {
return utf8 != null ? utf8 : new Utf8String(toString());
} | return utf8 != null ? utf8 : new Utf8String(toString()); | public Utf8Array toUtf8() {
return utf8;
} | class Version implements Comparable<Version> {
private final int major;
private final int minor;
private final int micro;
private final String qualifier;
private final Utf8Array utf8;
/** The empty version */
public static final Version emptyVersion = new Version();
/** Creates an empty version */
public Version() {
this(0, 0, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major) {
this(major, 0, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major, int minor) {
this(major, minor, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @param micro micro component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major, int minor, int micro) {
this(major, minor, micro, "");
}
/**
* Creates a version identifier from the specified components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @param micro micro component of the version identifier
* @param qualifier Qualifier component of the version identifier, or null if not specified
* @throws IllegalArgumentException if the numerical components are negative
* the qualifier string contains non-word/digit-characters, or
* an earlier component is not specified but a later one is
*/
public Version(int major, int minor, int micro, String qualifier) {
this.major = major;
this.minor = minor;
this.micro = micro;
this.qualifier = (qualifier != null) ? qualifier : "";
utf8 = new Utf8String(toString());
verify();
}
/**
* Creates a version identifier from the specified string.
*
* <p>
* Version strings follows this grammar (same as Osgi versions):
*
* <pre>
* version ::= major('.'minor('.'micro('.'qualifier)?)?)?
* major ::= digit+
* minor ::= digit+
* micro ::= digit+
* qualifier ::= (alpha|digit|'_'|'-')+
* digit ::= [0..9]
* alpha ::= [a..zA..Z]
* </pre>
*
* @param versionString String representation of the version identifier
* @throws IllegalArgumentException If <code>version</code> is improperly formatted.
*/
public Version(String versionString) {
if (! "".equals(versionString)) {
String[] components=versionString.split("\\.");
major = (components.length > 0) ? Integer.parseInt(components[0]) : 0;
minor = (components.length > 1) ? Integer.parseInt(components[1]) : 0;
micro = (components.length > 2) ? Integer.parseInt(components[2]) : 0;
qualifier = (components.length > 3) ? components[3] : "";
if (components.length > 4)
throw new IllegalArgumentException("Too many components in '" + versionString + "'");
} else {
major = 0;
minor = 0;
micro = 0;
qualifier = "";
}
utf8 = new Utf8String(versionString);
verify();
}
static private int readInt(ByteBuffer bb) {
int accum=0;
for (int i=bb.remaining(); i > 0; i--) {
byte b=bb.get();
if (b >= 0x30 && b <= 0x39) {
accum = accum * 10 + (b-0x30);
} else if (b == 0x2e) {
return accum;
} else {
throw new IllegalArgumentException("Failed decoding integer from utf8stream. Stream = " + bb.toString());
}
}
return accum;
}
/**
* Creates a version identifier from the specified string.
*
* <p>
* Version strings follows this grammar (same as Osgi versions):
*
* <pre>
* version ::= major('.'minor('.'micro('.'qualifier)?)?)?
* major ::= digit+
* minor ::= digit+
* micro ::= digit+
* qualifier ::= (alpha|digit|'_'|'-')+
* digit ::= [0..9]
* alpha ::= [a..zA..Z]
* </pre>
*
* @param versionString String representation of the version identifier
* @throws IllegalArgumentException If <code>version</code> is improperly
* formatted.
*/
public Version(Utf8Array versionString) {
ByteBuffer bb = versionString.wrap();
if (bb.remaining() > 0) {
major = readInt(bb);
if (bb.remaining() > 0) {
minor = readInt(bb);
if (bb.remaining() > 0) {
micro = readInt(bb);
qualifier = (bb.remaining() > 0) ? Utf8.toString(bb) : "";
} else {
micro = 0;
qualifier = "";
}
} else {
minor = 0;
micro = 0;
qualifier = "";
}
} else {
throw new IllegalArgumentException("Empty version specification");
}
utf8 = versionString;
verify();
}
/** Returns new Version(versionString), or Version.emptyVersion if the input string is null or "" */
public static Version fromString(String versionString) {
return (versionString == null) ? emptyVersion :new Version(versionString);
}
/**
* Must be called on construction after the component values are set
*
* @throws IllegalArgumentException If the numerical components are negative
* or the qualifier string is invalid.
*/
private void verify() {
if (major < 0)
throw new IllegalArgumentException("Negative major in " + this);
if (minor < 0)
throw new IllegalArgumentException("Negative minor in " + this);
if (micro < 0)
throw new IllegalArgumentException("Negative micro in " + this);
for (int i = 0; i < qualifier.length(); i++) {
char c = qualifier.charAt(i);
if (!Character.isLetterOrDigit(c))
throw new IllegalArgumentException("Invalid qualifier in " + this +
": Invalid character at position " + i + " in qualifier");
}
}
private String toStringValue() {
StringBuilder b = new StringBuilder();
if (! qualifier.isEmpty()) {
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro()).append(".").append(qualifier);
} else if (getMicro() != 0) {
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro());
} else if (getMinor() != 0) {
b.append(getMajor()).append(".").append(getMinor());
} else if (getMajor() != 0) {
b.append(getMajor());
}
return b.toString();
}
/**
* Returns the string representation of this version identifier as major.minor.micro.qualifier,
* omitting .qualifier if qualifier empty or unspecified
* <p>
* This string form is part of the API of Version and will never change.
*/
public String toFullString() {
StringBuilder b = new StringBuilder();
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro());
if (! qualifier.isEmpty()) {
b.append(".");
b.append(qualifier);
}
return b.toString();
}
/** Returns the major component of this version, or 0 if not specified */
public int getMajor() { return major; }
/** Returns the minor component of this version, or 0 if not specified */
public int getMinor() { return minor; }
/** Returns the micro component of this version, or 0 if not specified */
public int getMicro() { return micro; }
/** Returns the qualifier component of this version, or "" if not specified */
public String getQualifier() { return qualifier; }
/**
* Returns the string representation of this version identifier as major.minor.micro.qualifier,
* omitting the remaining parts after reaching the first unspecified component.
* Unspecified version component is equivalent to 0 (or the empty string for qualifier).
* <p>
* The string representation of a Version specified here is a part of the API and will never change.
*/
@Override
public String toString() { return toStringValue(); }
@Override
public int hashCode() { return major*3 + minor*5 + micro*7 + qualifier.hashCode()*11; }
/** Returns whether this equals the empty version */
public boolean isEmpty() { return this.equals(emptyVersion); }
/**
* Compares this <code>Version</code> to another.
*
* <p>
* A version is considered to be <b>equal to </b> another version if the
* major, minor and micro components are equal and the qualifier component
* is equal (using <code>String.equals</code>).
* <p>
*
* @param object The <code>Version</code> object to be compared.
* @return <code>true</code> if <code>object</code> is a
* <code>Version</code> and is equal to this object;
* <code>false</code> otherwise.
*/
@Override
public boolean equals(Object object) {
if ( ! (object instanceof Version)) return false;
Version other = (Version) object;
if (this.major != other.major) return false;
if (this.minor != other.minor) return false;
if (this.micro != other.micro) return false;
return (this.qualifier.equals(other.qualifier));
}
@SuppressWarnings("unused")
private boolean equals(Object o1, Object o2) {
if (o1 == null && o2 == null) return true;
if (o1 == null || o2 == null) return false;
return o1.equals(o2);
}
/**
* Compares this <code>Version</code> object to another version.
* <p>
* A version is considered to be <b>less than </b> another version if its
* major component is less than the other version's major component, or the
* major components are equal and its minor component is less than the other
* version's minor component, or the major and minor components are equal
* and its micro component is less than the other version's micro component,
* or the major, minor and micro components are equal and it's qualifier
* component is less than the other version's qualifier component (using
* <code>String.compareTo</code>).
* <p>
* A version is considered to be <b>equal to</b> another version if the
* major, minor and micro components are equal and the qualifier component
* is equal (using <code>String.compareTo</code>).
* <p>
* Unspecified numeric components are treated as 0, unspecified qualifier is treated as the empty string.
*
* @param other the <code>Version</code> object to be compared.
* @return A negative integer, zero, or a positive integer if this object is
* less than, equal to, or greater than the specified <code>Version</code> object.
* @throws ClassCastException if the specified object is not a <code>Version</code>.
*/
@Override
public int compareTo(Version other) {
if (other == this) return 0;
int result = this.getMajor() - other.getMajor();
if (result != 0) return result;
result = this.getMinor() - other.getMinor();
if (result != 0) return result;
result = this.getMicro() - other.getMicro();
if (result != 0) return result;
return getQualifier().compareTo(other.getQualifier());
}
/**
* Returns whether this version number is strictly lower than the given version. This has the same semantics as
* {@link Version
*/
public boolean isBefore(Version other) {
return compareTo(other) < 0;
}
/**
* Returns whether this version number is strictly higher than the given version. This has the same semantics as
* {@link Version
*/
public boolean isAfter(Version other) {
return compareTo(other) > 0;
}
/** Creates a version specification that only matches this version */
public VersionSpecification toSpecification() {
return (this == emptyVersion)
? VersionSpecification.emptyVersionSpecification
: new VersionSpecification(getMajor(), getMinor(), getMicro(), getQualifier());
}
} | class Version implements Comparable<Version> {
private final int major;
private final int minor;
private final int micro;
private final String qualifier;
private final Utf8Array utf8;
/** The empty version */
public static final Version emptyVersion = new Version();
/** Creates an empty version */
public Version() {
this(0, 0, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major) {
this(major, 0, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major, int minor) {
this(major, minor, 0, "");
}
/**
* Creates a version identifier from the specified numerical components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @param micro micro component of the version identifier
* @throws IllegalArgumentException If the numerical components are
* negative.
*/
public Version(int major, int minor, int micro) {
this(major, minor, micro, "");
}
/**
* Creates a version identifier from the specified components.
*
* @param major major component of the version identifier
* @param minor minor component of the version identifier
* @param micro micro component of the version identifier
* @param qualifier Qualifier component of the version identifier, or null if not specified
* @throws IllegalArgumentException if the numerical components are negative
* the qualifier string contains non-word/digit-characters, or
* an earlier component is not specified but a later one is
*/
public Version(int major, int minor, int micro, String qualifier) {
this.major = major;
this.minor = minor;
this.micro = micro;
this.qualifier = (qualifier != null) ? qualifier : "";
utf8 = new Utf8String(toString());
verify();
}
/**
* Creates a version identifier from the specified string.
*
* <p>
* Version strings follows this grammar (same as Osgi versions):
*
* <pre>
* version ::= major('.'minor('.'micro('.'qualifier)?)?)?
* major ::= digit+
* minor ::= digit+
* micro ::= digit+
* qualifier ::= (alpha|digit|'_'|'-')+
* digit ::= [0..9]
* alpha ::= [a..zA..Z]
* </pre>
*
* @param versionString String representation of the version identifier
* @throws IllegalArgumentException If <code>version</code> is improperly formatted.
*/
public Version(String versionString) {
if (! "".equals(versionString)) {
String[] components=versionString.split("\\.");
major = (components.length > 0) ? Integer.parseInt(components[0]) : 0;
minor = (components.length > 1) ? Integer.parseInt(components[1]) : 0;
micro = (components.length > 2) ? Integer.parseInt(components[2]) : 0;
qualifier = (components.length > 3) ? components[3] : "";
if (components.length > 4)
throw new IllegalArgumentException("Too many components in '" + versionString + "'");
} else {
major = 0;
minor = 0;
micro = 0;
qualifier = "";
}
utf8 = new Utf8String(versionString);
verify();
}
static private int readInt(ByteBuffer bb) {
int accum=0;
for (int i=bb.remaining(); i > 0; i--) {
byte b=bb.get();
if (b >= 0x30 && b <= 0x39) {
accum = accum * 10 + (b-0x30);
} else if (b == 0x2e) {
return accum;
} else {
throw new IllegalArgumentException("Failed decoding integer from utf8stream. Stream = " + bb.toString());
}
}
return accum;
}
/**
* Creates a version identifier from the specified string.
*
* <p>
* Version strings follows this grammar (same as Osgi versions):
*
* <pre>
* version ::= major('.'minor('.'micro('.'qualifier)?)?)?
* major ::= digit+
* minor ::= digit+
* micro ::= digit+
* qualifier ::= (alpha|digit|'_'|'-')+
* digit ::= [0..9]
* alpha ::= [a..zA..Z]
* </pre>
*
* @param versionString String representation of the version identifier
* @throws IllegalArgumentException If <code>version</code> is improperly
* formatted.
*/
public Version(Utf8Array versionString) {
ByteBuffer bb = versionString.wrap();
if (bb.remaining() > 0) {
major = readInt(bb);
if (bb.remaining() > 0) {
minor = readInt(bb);
if (bb.remaining() > 0) {
micro = readInt(bb);
qualifier = (bb.remaining() > 0) ? Utf8.toString(bb) : "";
} else {
micro = 0;
qualifier = "";
}
} else {
minor = 0;
micro = 0;
qualifier = "";
}
} else {
throw new IllegalArgumentException("Empty version specification");
}
utf8 = versionString;
verify();
}
/** Returns new Version(versionString), or Version.emptyVersion if the input string is null or "" */
public static Version fromString(String versionString) {
return (versionString == null) ? emptyVersion :new Version(versionString);
}
/**
* Must be called on construction after the component values are set
*
* @throws IllegalArgumentException If the numerical components are negative
* or the qualifier string is invalid.
*/
private void verify() {
if (major < 0)
throw new IllegalArgumentException("Negative major in " + this);
if (minor < 0)
throw new IllegalArgumentException("Negative minor in " + this);
if (micro < 0)
throw new IllegalArgumentException("Negative micro in " + this);
for (int i = 0; i < qualifier.length(); i++) {
char c = qualifier.charAt(i);
if (!Character.isLetterOrDigit(c))
throw new IllegalArgumentException("Invalid qualifier in " + this +
": Invalid character at position " + i + " in qualifier");
}
}
private String toStringValue() {
StringBuilder b = new StringBuilder();
if (! qualifier.isEmpty()) {
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro()).append(".").append(qualifier);
} else if (getMicro() != 0) {
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro());
} else if (getMinor() != 0) {
b.append(getMajor()).append(".").append(getMinor());
} else if (getMajor() != 0) {
b.append(getMajor());
}
return b.toString();
}
/**
* Returns the string representation of this version identifier as major.minor.micro.qualifier,
* omitting .qualifier if qualifier empty or unspecified
* <p>
* This string form is part of the API of Version and will never change.
*/
public String toFullString() {
StringBuilder b = new StringBuilder();
b.append(getMajor()).append(".").append(getMinor()).append(".").append(getMicro());
if (! qualifier.isEmpty()) {
b.append(".");
b.append(qualifier);
}
return b.toString();
}
/** Returns the major component of this version, or 0 if not specified */
public int getMajor() { return major; }
/** Returns the minor component of this version, or 0 if not specified */
public int getMinor() { return minor; }
/** Returns the micro component of this version, or 0 if not specified */
public int getMicro() { return micro; }
/** Returns the qualifier component of this version, or "" if not specified */
public String getQualifier() { return qualifier; }
/**
* Returns the string representation of this version identifier as major.minor.micro.qualifier,
* omitting the remaining parts after reaching the first unspecified component.
* Unspecified version component is equivalent to 0 (or the empty string for qualifier).
* <p>
* The string representation of a Version specified here is a part of the API and will never change.
*/
@Override
public String toString() { return toStringValue(); }
@Override
public int hashCode() { return major*3 + minor*5 + micro*7 + qualifier.hashCode()*11; }
/** Returns whether this equals the empty version */
public boolean isEmpty() { return this.equals(emptyVersion); }
/**
* Compares this <code>Version</code> to another.
*
* <p>
* A version is considered to be <b>equal to </b> another version if the
* major, minor and micro components are equal and the qualifier component
* is equal (using <code>String.equals</code>).
* <p>
*
* @param object The <code>Version</code> object to be compared.
* @return <code>true</code> if <code>object</code> is a
* <code>Version</code> and is equal to this object;
* <code>false</code> otherwise.
*/
@Override
public boolean equals(Object object) {
if ( ! (object instanceof Version)) return false;
Version other = (Version) object;
if (this.major != other.major) return false;
if (this.minor != other.minor) return false;
if (this.micro != other.micro) return false;
return (this.qualifier.equals(other.qualifier));
}
@SuppressWarnings("unused")
private boolean equals(Object o1, Object o2) {
if (o1 == null && o2 == null) return true;
if (o1 == null || o2 == null) return false;
return o1.equals(o2);
}
/**
* Compares this <code>Version</code> object to another version.
* <p>
* A version is considered to be <b>less than </b> another version if its
* major component is less than the other version's major component, or the
* major components are equal and its minor component is less than the other
* version's minor component, or the major and minor components are equal
* and its micro component is less than the other version's micro component,
* or the major, minor and micro components are equal and it's qualifier
* component is less than the other version's qualifier component (using
* <code>String.compareTo</code>).
* <p>
* A version is considered to be <b>equal to</b> another version if the
* major, minor and micro components are equal and the qualifier component
* is equal (using <code>String.compareTo</code>).
* <p>
* Unspecified numeric components are treated as 0, unspecified qualifier is treated as the empty string.
*
* @param other the <code>Version</code> object to be compared.
* @return A negative integer, zero, or a positive integer if this object is
* less than, equal to, or greater than the specified <code>Version</code> object.
* @throws ClassCastException if the specified object is not a <code>Version</code>.
*/
@Override
public int compareTo(Version other) {
if (other == this) return 0;
int result = this.getMajor() - other.getMajor();
if (result != 0) return result;
result = this.getMinor() - other.getMinor();
if (result != 0) return result;
result = this.getMicro() - other.getMicro();
if (result != 0) return result;
return getQualifier().compareTo(other.getQualifier());
}
/**
* Returns whether this version number is strictly lower than the given version. This has the same semantics as
* {@link Version
*/
public boolean isBefore(Version other) {
return compareTo(other) < 0;
}
/**
* Returns whether this version number is strictly higher than the given version. This has the same semantics as
* {@link Version
*/
public boolean isAfter(Version other) {
return compareTo(other) > 0;
}
/** Creates a version specification that only matches this version */
public VersionSpecification toSpecification() {
return (this == emptyVersion)
? VersionSpecification.emptyVersionSpecification
: new VersionSpecification(getMajor(), getMinor(), getMicro(), getQualifier());
}
} |
Looks like we have this in several places now. We should find a way to model it properly soon. | public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
if (node.type().isHost()) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children);
List<Node> removed = new ArrayList<>(children);
if (zone.cloud().value().equals("aws"))
db.removeNodes(List.of(node));
else
move(node, State.deprovisioned, Agent.system, Optional.empty());
removed.add(node);
return removed;
}
else {
db.removeNodes(List.of(node));
return List.of(node);
}
}
} | if (zone.cloud().value().equals("aws")) | public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
if (node.type().isDockerHost()) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children);
List<Node> removed = new ArrayList<>(children);
if (zone.cloud().value().equals("aws"))
db.removeNodes(List.of(node));
else
move(node, State.deprovisioned, Agent.system, Optional.empty());
removed.add(node);
return removed;
}
else {
db.removeNodes(List.of(node));
return List.of(node);
}
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone, FlagSource flagSource) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), config.useCuratorClientCache(), flagSource);
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache, FlagSource flagSource) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(this);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage, Flags.DOCKER_IMAGE_OVERRIDE.bindTo(flagSource));
this.jobControl = new JobControl(db);
for (State state : State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for given node */
public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of all nodes in this repository */
public NodeList list() {
return NodeList.copyOf(getNodes());
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(getNodes(), lock);
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return loadBalancers((ignored) -> true);
}
/** Returns a filterable list of load balancers belonging to given application */
public LoadBalancerList loadBalancers(ApplicationId application) {
return loadBalancers((id) -> id.application().equals(application));
}
private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) {
return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values());
}
public List<Node> getNodes(ApplicationId id, State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(State.inactive); }
public List<Node> getFailed() { return db.getNodes(State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
trustedPorts.add(22);
candidates.parentOf(node).ifPresent(trustedNodes::add);
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
node.allocation().ifPresent(allocation ->
trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner()).asList()).asList()));
if (node.state() == State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
break;
default:
illegal("Don't know how to create ACL for " + node + " of type " + node.type());
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates));
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname,
Flavor flavor, Optional<TenantName> reservedTo, NodeType type) {
if (ipConfig.primary().isEmpty())
ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname));
return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))
illegal("Cannot add " + node + ": This is not a docker node");
if ( ! node.allocation().isPresent())
illegal("Cannot add " + node + ": Docker containers needs to be allocated");
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), State.reserved);
}
/**
* Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes.
* If any of the nodes already exists in the deprovisioned state, they will be moved back to provisioned instead
* and the returned list will contain the existing (moved) node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToMove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
nodesToMove.add(existing.get());
}
else {
nodesToAdd.add(node);
}
}
List<Node> resultingNodes = new ArrayList<>();
resultingNodes.addAll(db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned));
nodesToMove.forEach(node -> resultingNodes.add(move(node, State.provisioned, agent, Optional.empty())));
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != State.provisioned && node.state() != State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
deactivate(db.getNodes(application, State.reserved, State.active), transaction);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
return db.writeTo(State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != State.provisioned)
.filter(node -> node.state() != State.failed)
.filter(node -> node.state() != State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]");
return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
illegal("Could not set " + node + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
return removeRecursively(node, false);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node: node is unallocated
* - Non-Docker-container node: iff in state provisioned|failed|parked
* - Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingAsChild, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent())
illegal(node + " is currently allocated and cannot be removed");
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !removingAsChild) {
if (node.state() != State.ready)
illegal(node + " can not be removed as it is not in the state [ready]");
}
else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready);
if ( ! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
else {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked);
if (! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(State.active, filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* @return the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state.
*/
public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock Already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node, lock));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node, lock));
}
}
return resultingNodes;
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone, FlagSource flagSource) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), config.useCuratorClientCache(), flagSource);
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache, FlagSource flagSource) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(this);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage, Flags.DOCKER_IMAGE_OVERRIDE.bindTo(flagSource));
this.jobControl = new JobControl(db);
for (State state : State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for given node */
public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of all nodes in this repository */
public NodeList list() {
return NodeList.copyOf(getNodes());
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(getNodes(), lock);
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return loadBalancers((ignored) -> true);
}
/** Returns a filterable list of load balancers belonging to given application */
public LoadBalancerList loadBalancers(ApplicationId application) {
return loadBalancers((id) -> id.application().equals(application));
}
private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) {
return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values());
}
public List<Node> getNodes(ApplicationId id, State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(State.inactive); }
public List<Node> getFailed() { return db.getNodes(State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
trustedPorts.add(22);
candidates.parentOf(node).ifPresent(trustedNodes::add);
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
node.allocation().ifPresent(allocation ->
trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner()).asList()).asList()));
if (node.state() == State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
break;
default:
illegal("Don't know how to create ACL for " + node + " of type " + node.type());
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates));
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname,
Flavor flavor, Optional<TenantName> reservedTo, NodeType type) {
if (ipConfig.primary().isEmpty())
ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname));
return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))
illegal("Cannot add " + node + ": This is not a docker node");
if ( ! node.allocation().isPresent())
illegal("Cannot add " + node + ": Docker containers needs to be allocated");
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), State.reserved);
}
/**
* Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes.
* If any of the nodes already exists in the deprovisioned state, they will be moved back to provisioned instead
* and the returned list will contain the existing (moved) node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToMove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
nodesToMove.add(existing.get());
}
else {
nodesToAdd.add(node);
}
}
List<Node> resultingNodes = new ArrayList<>();
resultingNodes.addAll(db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned));
nodesToMove.forEach(node -> resultingNodes.add(move(node, State.provisioned, agent, Optional.empty())));
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != State.provisioned && node.state() != State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
deactivate(db.getNodes(application, State.reserved, State.active), transaction);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
return db.writeTo(State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isDockerHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != State.provisioned)
.filter(node -> node.state() != State.failed)
.filter(node -> node.state() != State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]");
return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
illegal("Could not set " + node + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
return removeRecursively(node, false);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node: node is unallocated
* - Non-Docker-container node: iff in state provisioned|failed|parked
* - Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingAsChild, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent())
illegal(node + " is currently allocated and cannot be removed");
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !removingAsChild) {
if (node.state() != State.ready)
illegal(node + " can not be removed as it is not in the state [ready]");
}
else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready);
if ( ! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
else {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked);
if (! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(State.active, filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* @return the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state.
*/
public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock Already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node, lock));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node, lock));
}
}
return resultingNodes;
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
} |
Agree. What I want to do is create a "cloud config" which defines the properties of a cloud and then only test for those properties. | public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
if (node.type().isHost()) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children);
List<Node> removed = new ArrayList<>(children);
if (zone.cloud().value().equals("aws"))
db.removeNodes(List.of(node));
else
move(node, State.deprovisioned, Agent.system, Optional.empty());
removed.add(node);
return removed;
}
else {
db.removeNodes(List.of(node));
return List.of(node);
}
}
} | if (zone.cloud().value().equals("aws")) | public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
if (node.type().isDockerHost()) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children);
List<Node> removed = new ArrayList<>(children);
if (zone.cloud().value().equals("aws"))
db.removeNodes(List.of(node));
else
move(node, State.deprovisioned, Agent.system, Optional.empty());
removed.add(node);
return removed;
}
else {
db.removeNodes(List.of(node));
return List.of(node);
}
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone, FlagSource flagSource) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), config.useCuratorClientCache(), flagSource);
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache, FlagSource flagSource) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(this);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage, Flags.DOCKER_IMAGE_OVERRIDE.bindTo(flagSource));
this.jobControl = new JobControl(db);
for (State state : State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for given node */
public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of all nodes in this repository */
public NodeList list() {
return NodeList.copyOf(getNodes());
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(getNodes(), lock);
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return loadBalancers((ignored) -> true);
}
/** Returns a filterable list of load balancers belonging to given application */
public LoadBalancerList loadBalancers(ApplicationId application) {
return loadBalancers((id) -> id.application().equals(application));
}
private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) {
return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values());
}
public List<Node> getNodes(ApplicationId id, State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(State.inactive); }
public List<Node> getFailed() { return db.getNodes(State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
trustedPorts.add(22);
candidates.parentOf(node).ifPresent(trustedNodes::add);
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
node.allocation().ifPresent(allocation ->
trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner()).asList()).asList()));
if (node.state() == State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
break;
default:
illegal("Don't know how to create ACL for " + node + " of type " + node.type());
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates));
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname,
Flavor flavor, Optional<TenantName> reservedTo, NodeType type) {
if (ipConfig.primary().isEmpty())
ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname));
return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))
illegal("Cannot add " + node + ": This is not a docker node");
if ( ! node.allocation().isPresent())
illegal("Cannot add " + node + ": Docker containers needs to be allocated");
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), State.reserved);
}
/**
* Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes.
* If any of the nodes already exists in the deprovisioned state, they will be moved back to provisioned instead
* and the returned list will contain the existing (moved) node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToMove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
nodesToMove.add(existing.get());
}
else {
nodesToAdd.add(node);
}
}
List<Node> resultingNodes = new ArrayList<>();
resultingNodes.addAll(db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned));
nodesToMove.forEach(node -> resultingNodes.add(move(node, State.provisioned, agent, Optional.empty())));
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != State.provisioned && node.state() != State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
deactivate(db.getNodes(application, State.reserved, State.active), transaction);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
return db.writeTo(State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != State.provisioned)
.filter(node -> node.state() != State.failed)
.filter(node -> node.state() != State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]");
return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
illegal("Could not set " + node + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
return removeRecursively(node, false);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node: node is unallocated
* - Non-Docker-container node: iff in state provisioned|failed|parked
* - Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingAsChild, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent())
illegal(node + " is currently allocated and cannot be removed");
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !removingAsChild) {
if (node.state() != State.ready)
illegal(node + " can not be removed as it is not in the state [ready]");
}
else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready);
if ( ! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
else {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked);
if (! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(State.active, filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* @return the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state.
*/
public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock Already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node, lock));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node, lock));
}
}
return resultingNodes;
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone, FlagSource flagSource) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), config.useCuratorClientCache(), flagSource);
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache, FlagSource flagSource) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(this);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage, Flags.DOCKER_IMAGE_OVERRIDE.bindTo(flagSource));
this.jobControl = new JobControl(db);
for (State state : State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for given node */
public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of all nodes in this repository */
public NodeList list() {
return NodeList.copyOf(getNodes());
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(getNodes(), lock);
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return loadBalancers((ignored) -> true);
}
/** Returns a filterable list of load balancers belonging to given application */
public LoadBalancerList loadBalancers(ApplicationId application) {
return loadBalancers((id) -> id.application().equals(application));
}
private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) {
return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values());
}
public List<Node> getNodes(ApplicationId id, State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(State.inactive); }
public List<Node> getFailed() { return db.getNodes(State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
trustedPorts.add(22);
candidates.parentOf(node).ifPresent(trustedNodes::add);
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
node.allocation().ifPresent(allocation ->
trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner()).asList()).asList()));
if (node.state() == State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
break;
default:
illegal("Don't know how to create ACL for " + node + " of type " + node.type());
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates));
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname,
Flavor flavor, Optional<TenantName> reservedTo, NodeType type) {
if (ipConfig.primary().isEmpty())
ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname));
return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))
illegal("Cannot add " + node + ": This is not a docker node");
if ( ! node.allocation().isPresent())
illegal("Cannot add " + node + ": Docker containers needs to be allocated");
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), State.reserved);
}
/**
* Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes.
* If any of the nodes already exists in the deprovisioned state, they will be moved back to provisioned instead
* and the returned list will contain the existing (moved) node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToMove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
nodesToMove.add(existing.get());
}
else {
nodesToAdd.add(node);
}
}
List<Node> resultingNodes = new ArrayList<>();
resultingNodes.addAll(db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned));
nodesToMove.forEach(node -> resultingNodes.add(move(node, State.provisioned, agent, Optional.empty())));
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != State.provisioned && node.state() != State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
deactivate(db.getNodes(application, State.reserved, State.active), transaction);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
return db.writeTo(State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isDockerHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != State.provisioned)
.filter(node -> node.state() != State.failed)
.filter(node -> node.state() != State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]");
return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
illegal("Could not set " + node + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
return removeRecursively(node, false);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node: node is unallocated
* - Non-Docker-container node: iff in state provisioned|failed|parked
* - Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingAsChild, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent())
illegal(node + " is currently allocated and cannot be removed");
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !removingAsChild) {
if (node.state() != State.ready)
illegal(node + " can not be removed as it is not in the state [ready]");
}
else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready);
if ( ! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
else {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked);
if (! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(State.active, filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* @return the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state.
*/
public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock Already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node, lock));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node, lock));
}
}
return resultingNodes;
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
} |
```suggestion ``` | public void writeDockerImageRepository(Optional<String> dockerImageRepository) {
System.out.println("Writing docker image repo");
dockerImageRepository.ifPresent(repo -> configCurator.putData(dockerImageRepositoryPath(), repo));
} | System.out.println("Writing docker image repo"); | public void writeDockerImageRepository(Optional<String> dockerImageRepository) {
dockerImageRepository.ifPresent(repo -> configCurator.putData(dockerImageRepositoryPath(), repo));
} | class SessionZooKeeperClient {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(SessionZooKeeperClient.class.getName());
static final String APPLICATION_ID_PATH = "applicationId";
private static final String VERSION_PATH = "version";
private static final String CREATE_TIME_PATH = "createTime";
private static final String DOCKER_IMAGE_REPOSITORY_PATH = "dockerImageRepository";
private final Curator curator;
private final ConfigCurator configCurator;
private final Path sessionPath;
private final Path sessionStatusPath;
private final String serverId;
private final Optional<NodeFlavors> nodeFlavors;
public SessionZooKeeperClient(Curator curator, Path sessionPath) {
this(curator, ConfigCurator.create(curator), sessionPath, "", Optional.empty());
}
public SessionZooKeeperClient(Curator curator,
ConfigCurator configCurator,
Path sessionPath,
String serverId,
Optional<NodeFlavors> nodeFlavors) {
this.curator = curator;
this.configCurator = configCurator;
this.sessionPath = sessionPath;
this.serverId = serverId;
this.nodeFlavors = nodeFlavors;
this.sessionStatusPath = sessionPath.append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH);
}
public void writeStatus(Session.Status sessionStatus) {
try {
createWriteStatusTransaction(sessionStatus).commit();
} catch (Exception e) {
throw new RuntimeException("Unable to write session status", e);
}
}
public Session.Status readStatus() {
try {
String data = configCurator.getData(sessionStatusPath.getAbsolute());
return Session.Status.parse(data);
} catch (Exception e) {
log.log(LogLevel.INFO, "Unable to read session status, assuming it was deleted");
return Session.Status.NONE;
}
}
Curator.CompletionWaiter createPrepareWaiter() {
return createCompletionWaiter(PREPARE_BARRIER);
}
Curator.CompletionWaiter createActiveWaiter() {
return createCompletionWaiter(ACTIVE_BARRIER);
}
Curator.CompletionWaiter getPrepareWaiter() {
return getCompletionWaiter(getWaiterPath(PREPARE_BARRIER));
}
Curator.CompletionWaiter getActiveWaiter() {
return getCompletionWaiter(getWaiterPath(ACTIVE_BARRIER));
}
Curator.CompletionWaiter getUploadWaiter() { return getCompletionWaiter(getWaiterPath(UPLOAD_BARRIER)); }
private static final String PREPARE_BARRIER = "prepareBarrier";
private static final String ACTIVE_BARRIER = "activeBarrier";
private static final String UPLOAD_BARRIER = "uploadBarrier";
private Path getWaiterPath(String barrierName) {
return sessionPath.append(barrierName);
}
/** Returns the number of node members needed in a barrier */
private int getNumberOfMembers() {
return (curator.zooKeeperEnsembleCount() / 2) + 1;
}
private Curator.CompletionWaiter createCompletionWaiter(String waiterNode) {
return curator.createCompletionWaiter(sessionPath, waiterNode, getNumberOfMembers(), serverId);
}
private Curator.CompletionWaiter getCompletionWaiter(Path path) {
return curator.getCompletionWaiter(path, getNumberOfMembers(), serverId);
}
public void delete(NestedTransaction transaction ) {
try {
log.log(LogLevel.DEBUG, "Deleting " + sessionPath.getAbsolute());
CuratorTransaction curatorTransaction = new CuratorTransaction(curator);
CuratorOperations.deleteAll(sessionPath.getAbsolute(), curator).forEach(curatorTransaction::add);
transaction.add(curatorTransaction);
transaction.commit();
} catch (RuntimeException e) {
log.log(LogLevel.INFO, "Error deleting session (" + sessionPath.getAbsolute() + ") from zookeeper", e);
}
}
/** Returns a transaction deleting this session on commit */
public CuratorTransaction deleteTransaction() {
return CuratorTransaction.from(CuratorOperations.deleteAll(sessionPath.getAbsolute(), curator), curator);
}
public ApplicationPackage loadApplicationPackage() {
return new ZKApplicationPackage(configCurator, sessionPath, nodeFlavors);
}
public ConfigDefinitionRepo getUserConfigDefinitions() {
return new UserConfigDefinitionRepo(configCurator, sessionPath.append(ConfigCurator.USER_DEFCONFIGS_ZK_SUBPATH).getAbsolute());
}
private String applicationIdPath() {
return sessionPath.append(APPLICATION_ID_PATH).getAbsolute();
}
public void writeApplicationId(ApplicationId id) {
configCurator.putData(applicationIdPath(), id.serializedForm());
}
public ApplicationId readApplicationId() {
if ( ! configCurator.exists(applicationIdPath())) return ApplicationId.defaultId();
return ApplicationId.fromSerializedForm(configCurator.getData(applicationIdPath()));
}
private String versionPath() {
return sessionPath.append(VERSION_PATH).getAbsolute();
}
private String dockerImageRepositoryPath() {
return sessionPath.append(DOCKER_IMAGE_REPOSITORY_PATH).getAbsolute();
}
public void writeVespaVersion(Version version) {
configCurator.putData(versionPath(), version.toString());
}
public Version readVespaVersion() {
if ( ! configCurator.exists(versionPath())) return Vtag.currentVersion;
return new Version(configCurator.getData(versionPath()));
}
public Optional<String> readDockerImageRepository() {
if ( ! configCurator.exists(dockerImageRepositoryPath())) return Optional.empty();
String dockerImageRepository = configCurator.getData(dockerImageRepositoryPath());
return dockerImageRepository.isEmpty() ? Optional.empty() : Optional.of(dockerImageRepository);
}
public long readCreateTime() {
String path = getCreateTimePath();
if ( ! configCurator.exists(path)) return 0L;
return Long.parseLong(configCurator.getData(path));
}
private String getCreateTimePath() {
return sessionPath.append(CREATE_TIME_PATH).getAbsolute();
}
AllocatedHosts getAllocatedHosts() {
return loadApplicationPackage().getAllocatedHosts()
.orElseThrow(() -> new IllegalStateException("Allocated hosts does not exists"));
}
public ZooKeeperDeployer createDeployer(DeployLogger logger) {
ZooKeeperClient zkClient = new ZooKeeperClient(configCurator, logger, true, sessionPath);
return new ZooKeeperDeployer(zkClient);
}
public Transaction createWriteStatusTransaction(Session.Status status) {
String path = sessionStatusPath.getAbsolute();
CuratorTransaction transaction = new CuratorTransaction(curator);
if (configCurator.exists(path)) {
transaction.add(CuratorOperations.setData(sessionStatusPath.getAbsolute(), Utf8.toBytes(status.name())));
} else {
transaction.add(CuratorOperations.create(sessionStatusPath.getAbsolute(), Utf8.toBytes(status.name())));
}
return transaction;
}
/**
* Create necessary paths atomically for a new session.
*
* @param createTime Time of session creation.
* @param timeUnit Time unit of createTime.
*/
public void createNewSession(long createTime, TimeUnit timeUnit) {
CuratorTransaction transaction = new CuratorTransaction(curator);
transaction.add(CuratorOperations.create(sessionPath.getAbsolute()));
transaction.add(CuratorOperations.create(sessionPath.append(UPLOAD_BARRIER).getAbsolute()));
transaction.add(createWriteStatusTransaction(Session.Status.NEW).operations());
transaction.add(CuratorOperations.create(getCreateTimePath(), Utf8.toBytes(String.valueOf(timeUnit.toSeconds(createTime)))));
transaction.commit();
}
} | class SessionZooKeeperClient {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(SessionZooKeeperClient.class.getName());
static final String APPLICATION_ID_PATH = "applicationId";
private static final String VERSION_PATH = "version";
private static final String CREATE_TIME_PATH = "createTime";
private static final String DOCKER_IMAGE_REPOSITORY_PATH = "dockerImageRepository";
private final Curator curator;
private final ConfigCurator configCurator;
private final Path sessionPath;
private final Path sessionStatusPath;
private final String serverId;
private final Optional<NodeFlavors> nodeFlavors;
public SessionZooKeeperClient(Curator curator, Path sessionPath) {
this(curator, ConfigCurator.create(curator), sessionPath, "", Optional.empty());
}
public SessionZooKeeperClient(Curator curator,
ConfigCurator configCurator,
Path sessionPath,
String serverId,
Optional<NodeFlavors> nodeFlavors) {
this.curator = curator;
this.configCurator = configCurator;
this.sessionPath = sessionPath;
this.serverId = serverId;
this.nodeFlavors = nodeFlavors;
this.sessionStatusPath = sessionPath.append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH);
}
public void writeStatus(Session.Status sessionStatus) {
try {
createWriteStatusTransaction(sessionStatus).commit();
} catch (Exception e) {
throw new RuntimeException("Unable to write session status", e);
}
}
public Session.Status readStatus() {
try {
String data = configCurator.getData(sessionStatusPath.getAbsolute());
return Session.Status.parse(data);
} catch (Exception e) {
log.log(LogLevel.INFO, "Unable to read session status, assuming it was deleted");
return Session.Status.NONE;
}
}
Curator.CompletionWaiter createPrepareWaiter() {
return createCompletionWaiter(PREPARE_BARRIER);
}
Curator.CompletionWaiter createActiveWaiter() {
return createCompletionWaiter(ACTIVE_BARRIER);
}
Curator.CompletionWaiter getPrepareWaiter() {
return getCompletionWaiter(getWaiterPath(PREPARE_BARRIER));
}
Curator.CompletionWaiter getActiveWaiter() {
return getCompletionWaiter(getWaiterPath(ACTIVE_BARRIER));
}
Curator.CompletionWaiter getUploadWaiter() { return getCompletionWaiter(getWaiterPath(UPLOAD_BARRIER)); }
private static final String PREPARE_BARRIER = "prepareBarrier";
private static final String ACTIVE_BARRIER = "activeBarrier";
private static final String UPLOAD_BARRIER = "uploadBarrier";
private Path getWaiterPath(String barrierName) {
return sessionPath.append(barrierName);
}
/** Returns the number of node members needed in a barrier */
private int getNumberOfMembers() {
return (curator.zooKeeperEnsembleCount() / 2) + 1;
}
private Curator.CompletionWaiter createCompletionWaiter(String waiterNode) {
return curator.createCompletionWaiter(sessionPath, waiterNode, getNumberOfMembers(), serverId);
}
private Curator.CompletionWaiter getCompletionWaiter(Path path) {
return curator.getCompletionWaiter(path, getNumberOfMembers(), serverId);
}
public void delete(NestedTransaction transaction ) {
try {
log.log(LogLevel.DEBUG, "Deleting " + sessionPath.getAbsolute());
CuratorTransaction curatorTransaction = new CuratorTransaction(curator);
CuratorOperations.deleteAll(sessionPath.getAbsolute(), curator).forEach(curatorTransaction::add);
transaction.add(curatorTransaction);
transaction.commit();
} catch (RuntimeException e) {
log.log(LogLevel.INFO, "Error deleting session (" + sessionPath.getAbsolute() + ") from zookeeper", e);
}
}
/** Returns a transaction deleting this session on commit */
public CuratorTransaction deleteTransaction() {
return CuratorTransaction.from(CuratorOperations.deleteAll(sessionPath.getAbsolute(), curator), curator);
}
public ApplicationPackage loadApplicationPackage() {
return new ZKApplicationPackage(configCurator, sessionPath, nodeFlavors);
}
public ConfigDefinitionRepo getUserConfigDefinitions() {
return new UserConfigDefinitionRepo(configCurator, sessionPath.append(ConfigCurator.USER_DEFCONFIGS_ZK_SUBPATH).getAbsolute());
}
private String applicationIdPath() {
return sessionPath.append(APPLICATION_ID_PATH).getAbsolute();
}
public void writeApplicationId(ApplicationId id) {
configCurator.putData(applicationIdPath(), id.serializedForm());
}
public ApplicationId readApplicationId() {
if ( ! configCurator.exists(applicationIdPath())) return ApplicationId.defaultId();
return ApplicationId.fromSerializedForm(configCurator.getData(applicationIdPath()));
}
private String versionPath() {
return sessionPath.append(VERSION_PATH).getAbsolute();
}
private String dockerImageRepositoryPath() {
return sessionPath.append(DOCKER_IMAGE_REPOSITORY_PATH).getAbsolute();
}
public void writeVespaVersion(Version version) {
configCurator.putData(versionPath(), version.toString());
}
public Version readVespaVersion() {
if ( ! configCurator.exists(versionPath())) return Vtag.currentVersion;
return new Version(configCurator.getData(versionPath()));
}
public Optional<String> readDockerImageRepository() {
if ( ! configCurator.exists(dockerImageRepositoryPath())) return Optional.empty();
String dockerImageRepository = configCurator.getData(dockerImageRepositoryPath());
return dockerImageRepository.isEmpty() ? Optional.empty() : Optional.of(dockerImageRepository);
}
public long readCreateTime() {
String path = getCreateTimePath();
if ( ! configCurator.exists(path)) return 0L;
return Long.parseLong(configCurator.getData(path));
}
private String getCreateTimePath() {
return sessionPath.append(CREATE_TIME_PATH).getAbsolute();
}
AllocatedHosts getAllocatedHosts() {
return loadApplicationPackage().getAllocatedHosts()
.orElseThrow(() -> new IllegalStateException("Allocated hosts does not exists"));
}
public ZooKeeperDeployer createDeployer(DeployLogger logger) {
ZooKeeperClient zkClient = new ZooKeeperClient(configCurator, logger, true, sessionPath);
return new ZooKeeperDeployer(zkClient);
}
public Transaction createWriteStatusTransaction(Session.Status status) {
String path = sessionStatusPath.getAbsolute();
CuratorTransaction transaction = new CuratorTransaction(curator);
if (configCurator.exists(path)) {
transaction.add(CuratorOperations.setData(sessionStatusPath.getAbsolute(), Utf8.toBytes(status.name())));
} else {
transaction.add(CuratorOperations.create(sessionStatusPath.getAbsolute(), Utf8.toBytes(status.name())));
}
return transaction;
}
/**
* Create necessary paths atomically for a new session.
*
* @param createTime Time of session creation.
* @param timeUnit Time unit of createTime.
*/
public void createNewSession(long createTime, TimeUnit timeUnit) {
CuratorTransaction transaction = new CuratorTransaction(curator);
transaction.add(CuratorOperations.create(sessionPath.getAbsolute()));
transaction.add(CuratorOperations.create(sessionPath.append(UPLOAD_BARRIER).getAbsolute()));
transaction.add(createWriteStatusTransaction(Session.Status.NEW).operations());
transaction.add(CuratorOperations.create(getCreateTimePath(), Utf8.toBytes(String.valueOf(timeUnit.toSeconds(createTime)))));
transaction.commit();
}
} |
Consider adding a comment why this is required. something like: "test framework currently use this identity when executing requests against containers" | private boolean canRouteDirectlyTo(DeploymentId deploymentId, Application application) {
if (controller.system().isPublic()) return true;
var athenzService = application.deploymentSpec().instance(deploymentId.applicationId().instance())
.flatMap(instance -> instance.athenzService(deploymentId.zoneId().environment(),
deploymentId.zoneId().region()));
if (athenzService.isEmpty()) return false;
var instance = application.require(deploymentId.applicationId().instance());
var compileVersion = Optional.ofNullable(instance.deployments().get(deploymentId.zoneId()))
.map(Deployment::applicationVersion)
.flatMap(ApplicationVersion::compileVersion);
if (compileVersion.isEmpty()) return false;
if (compileVersion.get().isBefore(DIRECT_ROUTING_MIN_VERSION)) return false;
return this.allowDirectRouting.with(FetchVector.Dimension.APPLICATION_ID,
deploymentId.applicationId().serializedForm())
.value();
} | private boolean canRouteDirectlyTo(DeploymentId deploymentId, Application application) {
if (controller.system().isPublic()) return true;
var athenzService = application.deploymentSpec().instance(deploymentId.applicationId().instance())
.flatMap(instance -> instance.athenzService(deploymentId.zoneId().environment(),
deploymentId.zoneId().region()));
if (athenzService.isEmpty()) return false;
var instance = application.require(deploymentId.applicationId().instance());
var compileVersion = Optional.ofNullable(instance.deployments().get(deploymentId.zoneId()))
.map(Deployment::applicationVersion)
.flatMap(ApplicationVersion::compileVersion)
.or(() -> application.latestVersion().flatMap(ApplicationVersion::compileVersion));
if (compileVersion.isEmpty()) return false;
if (compileVersion.get().isBefore(DIRECT_ROUTING_MIN_VERSION)) return false;
return this.allowDirectRouting.with(FetchVector.Dimension.APPLICATION_ID,
deploymentId.applicationId().serializedForm())
.value();
} | class RoutingController {
/** The minimum Vespa version that supports directly routed endpoints */
public static final Version DIRECT_ROUTING_MIN_VERSION = new Version(Integer.MAX_VALUE, Integer.MAX_VALUE,
Integer.MAX_VALUE);
private final Controller controller;
private final RoutingPolicies routingPolicies;
private final RotationRepository rotationRepository;
private final BooleanFlag allowDirectRouting;
public RoutingController(Controller controller, RotationsConfig rotationsConfig) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.routingPolicies = new RoutingPolicies(controller);
this.rotationRepository = new RotationRepository(rotationsConfig, controller.applications(),
controller.curator());
this.allowDirectRouting = Flags.ALLOW_DIRECT_ROUTING.bindTo(controller.flagSource());
}
public RoutingPolicies policies() {
return routingPolicies;
}
public RotationRepository rotations() {
return rotationRepository;
}
/** Returns zone-scoped endpoints for given deployment */
public EndpointList endpointsOf(DeploymentId deployment) {
var endpoints = new LinkedHashSet<Endpoint>();
controller.serviceRegistry().routingGenerator().clusterEndpoints(deployment)
.forEach((cluster, url) -> endpoints.add(Endpoint.of(deployment.applicationId())
.target(cluster, deployment.zoneId())
.routingMethod(RoutingMethod.shared)
.on(Port.fromRoutingMethod(RoutingMethod.shared))
.in(controller.system())));
boolean hasSharedEndpoint = !endpoints.isEmpty();
var application = Suppliers.memoize(() -> controller.applications().requireApplication(TenantAndApplicationId.from(deployment.applicationId())));
for (var policy : routingPolicies.get(deployment).values()) {
if (!policy.status().isActive()) continue;
for (var routingMethod : controller.zoneRegistry().routingMethods(policy.id().zone())) {
if (routingMethod.isDirect() && !canRouteDirectlyTo(deployment, application.get())) continue;
if (hasSharedEndpoint && routingMethod == RoutingMethod.shared) continue;
endpoints.add(policy.endpointIn(controller.system(), routingMethod));
}
}
return EndpointList.copyOf(endpoints);
}
/** Returns global-scoped endpoints for given instance */
public EndpointList endpointsOf(ApplicationId instance) {
return endpointsOf(controller.applications().requireInstance(instance));
}
/** Returns global-scoped endpoints for given instance */
public EndpointList endpointsOf(Instance instance) {
var endpoints = new LinkedHashSet<Endpoint>();
for (var rotation : instance.rotations()) {
var zones = rotation.regions().stream()
.map(region -> ZoneId.from(Environment.prod, region))
.collect(Collectors.toList());
EndpointList.global(RoutingId.of(instance.id(), rotation.endpointId()),
controller.system(), commonRoutingMethodsOf(zones))
.requiresRotation()
.forEach(endpoints::add);
}
var zonesByRoutingId = new LinkedHashMap<RoutingId, List<ZoneId>>();
for (var policy : routingPolicies.get(instance.id()).values()) {
if (!policy.status().isActive()) continue;
for (var endpointId : policy.endpoints()) {
var routingId = RoutingId.of(instance.id(), endpointId);
zonesByRoutingId.putIfAbsent(routingId, new ArrayList<>());
zonesByRoutingId.get(routingId).add(policy.id().zone());
}
}
zonesByRoutingId.forEach((routingId, zones) -> {
EndpointList.global(routingId, controller.system(), commonRoutingMethodsOf(zones))
.not().requiresRotation()
.forEach(endpoints::add);
});
return EndpointList.copyOf(endpoints);
}
/** Returns all non-global endpoints and corresponding cluster IDs for given deployments, grouped by their zone */
public Map<ZoneId, List<Endpoint>> zoneEndpointsOf(Collection<DeploymentId> deployments) {
var endpoints = new TreeMap<ZoneId, List<Endpoint>>(Comparator.comparing(ZoneId::value));
for (var deployment : deployments) {
var zoneEndpoints = endpointsOf(deployment).scope(Endpoint.Scope.zone).asList();
if ( ! zoneEndpoints.isEmpty()) {
endpoints.put(deployment.zoneId(), zoneEndpoints);
}
}
return Collections.unmodifiableMap(endpoints);
}
/** Change status of all global endpoints for given deployment */
public void setGlobalRotationStatus(DeploymentId deployment, EndpointStatus status) {
endpointsOf(deployment.applicationId()).requiresRotation().primary().ifPresent(endpoint -> {
try {
controller.serviceRegistry().configServer().setGlobalRotationStatus(deployment, endpoint.upstreamIdOf(deployment), status);
} catch (Exception e) {
throw new RuntimeException("Failed to set rotation status of " + endpoint + " in " + deployment, e);
}
});
}
/** Get global endpoint status for given deployment */
public Map<Endpoint, EndpointStatus> globalRotationStatus(DeploymentId deployment) {
var routingEndpoints = new LinkedHashMap<Endpoint, EndpointStatus>();
endpointsOf(deployment.applicationId()).requiresRotation().primary().ifPresent(endpoint -> {
var upstreamName = endpoint.upstreamIdOf(deployment);
var status = controller.serviceRegistry().configServer().getGlobalRotationStatus(deployment, upstreamName);
routingEndpoints.put(endpoint, status);
});
return Collections.unmodifiableMap(routingEndpoints);
}
/**
* Assigns one or more global rotations to given application, if eligible. The given application is implicitly
* stored, ensuring that the assigned rotation(s) are persisted when this returns.
*/
public LockedApplication assignRotations(LockedApplication application, InstanceName instanceName) {
try (RotationLock rotationLock = rotationRepository.lock()) {
var rotations = rotationRepository.getOrAssignRotations(application.get().deploymentSpec(),
application.get().require(instanceName),
rotationLock);
application = application.with(instanceName, instance -> instance.with(rotations));
controller.applications().store(application);
}
return application;
}
/**
* Register endpoints for rotations assigned to given application and zone in DNS.
*
* @return the registered endpoints
*/
public Set<ContainerEndpoint> registerEndpointsInDns(DeploymentSpec deploymentSpec, Instance instance, ZoneId zone) {
var containerEndpoints = new HashSet<ContainerEndpoint>();
boolean registerLegacyNames = deploymentSpec.instance(instance.name())
.flatMap(DeploymentInstanceSpec::globalServiceId)
.isPresent();
for (var assignedRotation : instance.rotations()) {
var names = new ArrayList<String>();
var endpoints = endpointsOf(instance).named(assignedRotation.endpointId()).requiresRotation();
if (!registerLegacyNames && !assignedRotation.regions().contains(zone.region())) {
continue;
}
if (!registerLegacyNames) {
endpoints = endpoints.not().legacy();
}
var rotation = rotationRepository.getRotation(assignedRotation.rotationId());
if (rotation.isPresent()) {
endpoints.forEach(endpoint -> {
controller.nameServiceForwarder().createCname(RecordName.from(endpoint.dnsName()),
RecordData.fqdn(rotation.get().name()),
Priority.normal);
names.add(endpoint.dnsName());
});
}
names.add(assignedRotation.rotationId().asString());
containerEndpoints.add(new ContainerEndpoint(assignedRotation.clusterId().value(), names));
}
return Collections.unmodifiableSet(containerEndpoints);
}
/** Remove endpoints in DNS for all rotations assigned to given instance */
public void removeEndpointsInDns(Instance instance) {
endpointsOf(instance).requiresRotation()
.forEach(endpoint -> controller.nameServiceForwarder()
.removeRecords(Record.Type.CNAME,
RecordName.from(endpoint.dnsName()),
Priority.normal));
}
/** Returns the routing methods that are common across given zones */
private List<RoutingMethod> commonRoutingMethodsOf(List<ZoneId> zones) {
var zonesByMethod = new HashMap<RoutingMethod, Set<ZoneId>>();
for (var zone : zones) {
for (var method : controller.zoneRegistry().routingMethods(zone)) {
zonesByMethod.putIfAbsent(method, new LinkedHashSet<>());
zonesByMethod.get(method).add(zone);
}
}
var routingMethods = new ArrayList<RoutingMethod>();
zonesByMethod.forEach((method, z) -> {
if (z.containsAll(zones)) {
routingMethods.add(method);
}
});
return Collections.unmodifiableList(routingMethods);
}
/** Returns whether traffic can be directly routed to given deployment */
} | class RoutingController {
/** The minimum Vespa version that supports directly routed endpoints */
public static final Version DIRECT_ROUTING_MIN_VERSION = new Version(Integer.MAX_VALUE, Integer.MAX_VALUE,
Integer.MAX_VALUE);
private final Controller controller;
private final RoutingPolicies routingPolicies;
private final RotationRepository rotationRepository;
private final BooleanFlag allowDirectRouting;
public RoutingController(Controller controller, RotationsConfig rotationsConfig) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.routingPolicies = new RoutingPolicies(controller);
this.rotationRepository = new RotationRepository(rotationsConfig, controller.applications(),
controller.curator());
this.allowDirectRouting = Flags.ALLOW_DIRECT_ROUTING.bindTo(controller.flagSource());
}
public RoutingPolicies policies() {
return routingPolicies;
}
public RotationRepository rotations() {
return rotationRepository;
}
/** Returns zone-scoped endpoints for given deployment */
public EndpointList endpointsOf(DeploymentId deployment) {
var endpoints = new LinkedHashSet<Endpoint>();
controller.serviceRegistry().routingGenerator().clusterEndpoints(deployment)
.forEach((cluster, url) -> endpoints.add(Endpoint.of(deployment.applicationId())
.target(cluster, deployment.zoneId())
.routingMethod(RoutingMethod.shared)
.on(Port.fromRoutingMethod(RoutingMethod.shared))
.in(controller.system())));
boolean hasSharedEndpoint = !endpoints.isEmpty();
var application = Suppliers.memoize(() -> controller.applications().requireApplication(TenantAndApplicationId.from(deployment.applicationId())));
for (var policy : routingPolicies.get(deployment).values()) {
if (!policy.status().isActive()) continue;
for (var routingMethod : controller.zoneRegistry().routingMethods(policy.id().zone())) {
if (routingMethod.isDirect() && !canRouteDirectlyTo(deployment, application.get())) continue;
if (hasSharedEndpoint && routingMethod == RoutingMethod.shared) continue;
endpoints.add(policy.endpointIn(controller.system(), routingMethod));
}
}
return EndpointList.copyOf(endpoints);
}
/** Returns global-scoped endpoints for given instance */
public EndpointList endpointsOf(ApplicationId instance) {
return endpointsOf(controller.applications().requireApplication(TenantAndApplicationId.from(instance)),
instance.instance());
}
/** Returns global-scoped endpoints for given instance */
public EndpointList endpointsOf(Application application, InstanceName instanceName) {
var endpoints = new LinkedHashSet<Endpoint>();
var instance = application.require(instanceName);
for (var rotation : instance.rotations()) {
var deployments = rotation.regions().stream()
.map(region -> new DeploymentId(instance.id(), ZoneId.from(Environment.prod, region)))
.collect(Collectors.toList());
EndpointList.global(RoutingId.of(instance.id(), rotation.endpointId()),
controller.system(), routingMethodsOfAll(deployments, application))
.requiresRotation()
.forEach(endpoints::add);
}
var deploymentsByRoutingId = new LinkedHashMap<RoutingId, List<DeploymentId>>();
for (var policy : routingPolicies.get(instance.id()).values()) {
if (!policy.status().isActive()) continue;
for (var endpointId : policy.endpoints()) {
var routingId = RoutingId.of(instance.id(), endpointId);
deploymentsByRoutingId.putIfAbsent(routingId, new ArrayList<>());
deploymentsByRoutingId.get(routingId).add(new DeploymentId(instance.id(), policy.id().zone()));
}
}
deploymentsByRoutingId.forEach((routingId, deployments) -> {
EndpointList.global(routingId, controller.system(), routingMethodsOfAll(deployments, application))
.not().requiresRotation()
.forEach(endpoints::add);
});
return EndpointList.copyOf(endpoints);
}
/** Returns all non-global endpoints and corresponding cluster IDs for given deployments, grouped by their zone */
public Map<ZoneId, List<Endpoint>> zoneEndpointsOf(Collection<DeploymentId> deployments) {
var endpoints = new TreeMap<ZoneId, List<Endpoint>>(Comparator.comparing(ZoneId::value));
for (var deployment : deployments) {
var zoneEndpoints = endpointsOf(deployment).scope(Endpoint.Scope.zone).asList();
if ( ! zoneEndpoints.isEmpty()) {
endpoints.put(deployment.zoneId(), zoneEndpoints);
}
}
return Collections.unmodifiableMap(endpoints);
}
/** Change status of all global endpoints for given deployment */
public void setGlobalRotationStatus(DeploymentId deployment, EndpointStatus status) {
endpointsOf(deployment.applicationId()).requiresRotation().primary().ifPresent(endpoint -> {
try {
controller.serviceRegistry().configServer().setGlobalRotationStatus(deployment, endpoint.upstreamIdOf(deployment), status);
} catch (Exception e) {
throw new RuntimeException("Failed to set rotation status of " + endpoint + " in " + deployment, e);
}
});
}
/** Get global endpoint status for given deployment */
public Map<Endpoint, EndpointStatus> globalRotationStatus(DeploymentId deployment) {
var routingEndpoints = new LinkedHashMap<Endpoint, EndpointStatus>();
endpointsOf(deployment.applicationId()).requiresRotation().primary().ifPresent(endpoint -> {
var upstreamName = endpoint.upstreamIdOf(deployment);
var status = controller.serviceRegistry().configServer().getGlobalRotationStatus(deployment, upstreamName);
routingEndpoints.put(endpoint, status);
});
return Collections.unmodifiableMap(routingEndpoints);
}
/**
* Assigns one or more global rotations to given application, if eligible. The given application is implicitly
* stored, ensuring that the assigned rotation(s) are persisted when this returns.
*/
public LockedApplication assignRotations(LockedApplication application, InstanceName instanceName) {
try (RotationLock rotationLock = rotationRepository.lock()) {
var rotations = rotationRepository.getOrAssignRotations(application.get().deploymentSpec(),
application.get().require(instanceName),
rotationLock);
application = application.with(instanceName, instance -> instance.with(rotations));
controller.applications().store(application);
}
return application;
}
/**
* Register endpoints for rotations assigned to given application and zone in DNS.
*
* @return the registered endpoints
*/
public Set<ContainerEndpoint> registerEndpointsInDns(Application application, InstanceName instanceName, ZoneId zone) {
var instance = application.require(instanceName);
var containerEndpoints = new HashSet<ContainerEndpoint>();
boolean registerLegacyNames = application.deploymentSpec().instance(instanceName)
.flatMap(DeploymentInstanceSpec::globalServiceId)
.isPresent();
for (var assignedRotation : instance.rotations()) {
var names = new ArrayList<String>();
var endpoints = endpointsOf(application, instanceName).named(assignedRotation.endpointId())
.requiresRotation();
if (!registerLegacyNames && !assignedRotation.regions().contains(zone.region())) {
continue;
}
if (!registerLegacyNames) {
endpoints = endpoints.not().legacy();
}
var rotation = rotationRepository.getRotation(assignedRotation.rotationId());
if (rotation.isPresent()) {
endpoints.forEach(endpoint -> {
controller.nameServiceForwarder().createCname(RecordName.from(endpoint.dnsName()),
RecordData.fqdn(rotation.get().name()),
Priority.normal);
names.add(endpoint.dnsName());
});
}
names.add(assignedRotation.rotationId().asString());
containerEndpoints.add(new ContainerEndpoint(assignedRotation.clusterId().value(), names));
}
return Collections.unmodifiableSet(containerEndpoints);
}
/** Remove endpoints in DNS for all rotations assigned to given instance */
public void removeEndpointsInDns(Application application, InstanceName instanceName) {
endpointsOf(application, instanceName).requiresRotation()
.forEach(endpoint -> controller.nameServiceForwarder()
.removeRecords(Record.Type.CNAME,
RecordName.from(endpoint.dnsName()),
Priority.normal));
}
/** Returns the routing methods that are available across all given deployments */
private List<RoutingMethod> routingMethodsOfAll(List<DeploymentId> deployments, Application application) {
var deploymentsByMethod = new HashMap<RoutingMethod, Set<DeploymentId>>();
for (var deployment : deployments) {
for (var method : controller.zoneRegistry().routingMethods(deployment.zoneId())) {
deploymentsByMethod.putIfAbsent(method, new LinkedHashSet<>());
deploymentsByMethod.get(method).add(deployment);
}
}
var routingMethods = new ArrayList<RoutingMethod>();
deploymentsByMethod.forEach((method, supportedDeployments) -> {
if (supportedDeployments.containsAll(deployments)) {
if (method.isDirect() && !canRouteDirectlyTo(deployments, application)) return;
routingMethods.add(method);
}
});
return Collections.unmodifiableList(routingMethods);
}
/** Returns whether traffic can be directly routed to all given deployments */
private boolean canRouteDirectlyTo(List<DeploymentId> deployments, Application application) {
return deployments.stream().allMatch(deployment -> canRouteDirectlyTo(deployment, application));
}
/** Returns whether traffic can be directly routed to given deployment */
} | |
Done | private void toSlime(Node node, boolean allFields, Cursor object) {
object.setString("url", nodeParentUrl + node.hostname());
if ( ! allFields) return;
object.setString("id", node.hostname());
object.setString("state", serializer.toString(node.state()));
object.setString("type", node.type().name());
object.setString("hostname", node.hostname());
object.setString("type", serializer.toString(node.type()));
if (node.parentHostname().isPresent()) {
object.setString("parentHostname", node.parentHostname().get());
}
object.setString("openStackId", node.id());
object.setString("flavor", node.flavor().name());
node.reservedTo().ifPresent(reservedTo -> object.setString("reservedTo", reservedTo.value()));
if (node.flavor().isConfigured())
object.setDouble("cpuCores", node.flavor().getMinCpuCores());
toSlime(node.flavor().resources(), object.setObject("resources"));
if (node.flavor().cost() > 0)
object.setLong("cost", node.flavor().cost());
object.setString("environment", node.flavor().getType().name());
node.allocation().ifPresent(allocation -> {
toSlime(allocation.owner(), object.setObject("owner"));
toSlime(allocation.membership(), object.setObject("membership"));
object.setLong("restartGeneration", allocation.restartGeneration().wanted());
object.setLong("currentRestartGeneration", allocation.restartGeneration().current());
String wantedVespaVersion = allocation.membership().cluster().vespaVersion().toFullString();
Optional<String> dockerImageRepo = allocation.membership().cluster().dockerImageRepo();
object.setString("wantedDockerImage", dockerImageRepo.map(s -> s + ":" + wantedVespaVersion)
.orElseGet(() -> nodeRepository.dockerImage(node).withTag(allocation.membership().cluster().vespaVersion()).asString()));
object.setString("wantedVespaVersion", wantedVespaVersion);
toSlime(allocation.requestedResources(), object.setObject("requestedResources"));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray("networkPorts")));
orchestrator.apply(new HostName(node.hostname()))
.ifPresent(info -> {
object.setBool("allowedToBeDown", info.status().isSuspended());
info.suspendedSince().ifPresent(since -> object.setLong("suspendedSinceMillis", since.toEpochMilli()));
});
});
object.setLong("rebootGeneration", node.status().reboot().wanted());
object.setLong("currentRebootGeneration", node.status().reboot().current());
node.status().osVersion().current().ifPresent(version -> object.setString("currentOsVersion", version.toFullString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString("wantedOsVersion", version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong("currentFirmwareCheck", instant.toEpochMilli()));
if (node.type().isDockerHost())
nodeRepository.firmwareChecks().requiredAfter().ifPresent(after -> object.setLong("wantedFirmwareCheck", after.toEpochMilli()));
node.status().vespaVersion().ifPresent(version -> object.setString("vespaVersion", version.toFullString()));
currentDockerImage(node).ifPresent(dockerImage -> object.setString("currentDockerImage", dockerImage.asString()));
object.setLong("failCount", node.status().failCount());
object.setBool("wantToRetire", node.status().wantToRetire());
object.setBool("wantToDeprovision", node.status().wantToDeprovision());
toSlime(node.history(), object.setArray("history"));
ipAddressesToSlime(node.ipAddresses(), object.setArray("ipAddresses"));
ipAddressesToSlime(node.ipAddressPool().asSet(), object.setArray("additionalIpAddresses"));
node.reports().toSlime(object, "reports");
node.modelName().ifPresent(modelName -> object.setString("modelName", modelName));
} | Optional<String> dockerImageRepo = allocation.membership().cluster().dockerImageRepo(); | private void toSlime(Node node, boolean allFields, Cursor object) {
object.setString("url", nodeParentUrl + node.hostname());
if ( ! allFields) return;
object.setString("id", node.hostname());
object.setString("state", serializer.toString(node.state()));
object.setString("type", node.type().name());
object.setString("hostname", node.hostname());
object.setString("type", serializer.toString(node.type()));
if (node.parentHostname().isPresent()) {
object.setString("parentHostname", node.parentHostname().get());
}
object.setString("openStackId", node.id());
object.setString("flavor", node.flavor().name());
node.reservedTo().ifPresent(reservedTo -> object.setString("reservedTo", reservedTo.value()));
if (node.flavor().isConfigured())
object.setDouble("cpuCores", node.flavor().getMinCpuCores());
toSlime(node.flavor().resources(), object.setObject("resources"));
if (node.flavor().cost() > 0)
object.setLong("cost", node.flavor().cost());
object.setString("environment", node.flavor().getType().name());
node.allocation().ifPresent(allocation -> {
toSlime(allocation.owner(), object.setObject("owner"));
toSlime(allocation.membership(), object.setObject("membership"));
object.setLong("restartGeneration", allocation.restartGeneration().wanted());
object.setLong("currentRestartGeneration", allocation.restartGeneration().current());
object.setString("wantedDockerImage", allocation.membership().cluster().dockerImage()
.orElseGet(() -> nodeRepository.dockerImage(node).withTag(allocation.membership().cluster().vespaVersion()).asString()));
object.setString("wantedVespaVersion", allocation.membership().cluster().vespaVersion().toFullString());
toSlime(allocation.requestedResources(), object.setObject("requestedResources"));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray("networkPorts")));
orchestrator.apply(new HostName(node.hostname()))
.ifPresent(info -> {
object.setBool("allowedToBeDown", info.status().isSuspended());
info.suspendedSince().ifPresent(since -> object.setLong("suspendedSinceMillis", since.toEpochMilli()));
});
});
object.setLong("rebootGeneration", node.status().reboot().wanted());
object.setLong("currentRebootGeneration", node.status().reboot().current());
node.status().osVersion().current().ifPresent(version -> object.setString("currentOsVersion", version.toFullString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString("wantedOsVersion", version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong("currentFirmwareCheck", instant.toEpochMilli()));
if (node.type().isDockerHost())
nodeRepository.firmwareChecks().requiredAfter().ifPresent(after -> object.setLong("wantedFirmwareCheck", after.toEpochMilli()));
node.status().vespaVersion().ifPresent(version -> object.setString("vespaVersion", version.toFullString()));
currentDockerImage(node).ifPresent(dockerImage -> object.setString("currentDockerImage", dockerImage.asString()));
object.setLong("failCount", node.status().failCount());
object.setBool("wantToRetire", node.status().wantToRetire());
object.setBool("wantToDeprovision", node.status().wantToDeprovision());
toSlime(node.history(), object.setArray("history"));
ipAddressesToSlime(node.ipAddresses(), object.setArray("ipAddresses"));
ipAddressesToSlime(node.ipAddressPool().asSet(), object.setArray("additionalIpAddresses"));
node.reports().toSlime(object, "reports");
node.modelName().ifPresent(modelName -> object.setString("modelName", modelName));
} | class NodesResponse extends HttpResponse {
/** The responses this can create */
public enum ResponseType { nodeList, stateList, nodesInStateList, singleNode }
/** The request url minus parameters, with a trailing slash added if missing */
private final String parentUrl;
/** The parent url of nodes */
private final String nodeParentUrl;
private final NodeFilter filter;
private final boolean recursive;
private final Function<HostName, Optional<HostInfo>> orchestrator;
private final NodeRepository nodeRepository;
private final Slime slime;
private final NodeSerializer serializer = new NodeSerializer();
public NodesResponse(ResponseType responseType, HttpRequest request,
Orchestrator orchestrator, NodeRepository nodeRepository) {
super(200);
this.parentUrl = toParentUrl(request);
this.nodeParentUrl = toNodeParentUrl(request);
filter = NodesApiHandler.toNodeFilter(request);
this.recursive = request.getBooleanProperty("recursive");
this.orchestrator = orchestrator.getHostResolver();
this.nodeRepository = nodeRepository;
slime = new Slime();
Cursor root = slime.setObject();
switch (responseType) {
case nodeList: nodesToSlime(root); break;
case stateList : statesToSlime(root); break;
case nodesInStateList: nodesToSlime(serializer.stateFrom(lastElement(parentUrl)), root); break;
case singleNode : nodeToSlime(lastElement(parentUrl), root); break;
default: throw new IllegalArgumentException();
}
}
private String toParentUrl(HttpRequest request) {
URI uri = request.getUri();
String parentUrl = uri.getScheme() + ":
if ( ! parentUrl.endsWith("/"))
parentUrl = parentUrl + "/";
return parentUrl;
}
private String toNodeParentUrl(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
@Override
public void render(OutputStream stream) throws IOException {
new JsonFormat(true).encode(stream, slime);
}
@Override
public String getContentType() {
return "application/json";
}
private void statesToSlime(Cursor root) {
Cursor states = root.setObject("states");
for (Node.State state : Node.State.values())
toSlime(state, states.setObject(serializer.toString(state)));
}
private void toSlime(Node.State state, Cursor object) {
object.setString("url", parentUrl + serializer.toString(state));
if (recursive)
nodesToSlime(state, object);
}
/** Outputs the nodes in the given state to a node array */
private void nodesToSlime(Node.State state, Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
for (NodeType type : NodeType.values())
toSlime(nodeRepository.getNodes(type, state), nodeArray);
}
/** Outputs all the nodes to a node array */
private void nodesToSlime(Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
toSlime(nodeRepository.getNodes(), nodeArray);
}
private void toSlime(List<Node> nodes, Cursor array) {
for (Node node : nodes) {
if ( ! filter.matches(node)) continue;
toSlime(node, recursive, array.addObject());
}
}
private void nodeToSlime(String hostname, Cursor object) {
Node node = nodeRepository.getNode(hostname).orElseThrow(() ->
new NotFoundException("No node with hostname '" + hostname + "'"));
toSlime(node, true, object);
}
private void toSlime(ApplicationId id, Cursor object) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
}
private void toSlime(ClusterMembership membership, Cursor object) {
object.setString("clustertype", membership.cluster().type().name());
object.setString("clusterid", membership.cluster().id().value());
object.setString("group", String.valueOf(membership.cluster().group().get().index()));
object.setLong("index", membership.index());
object.setBool("retired", membership.retired());
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events()) {
Cursor object = array.addObject();
object.setString("event", event.type().name());
object.setLong("at", event.at().toEpochMilli());
object.setString("agent", event.agent().name());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", serializer.toString(resources.diskSpeed()));
object.setString("storageType", serializer.toString(resources.storageType()));
}
private Optional<DockerImage> currentDockerImage(Node node) {
return node.status().dockerImage()
.or(() -> Optional.of(node)
.filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
.flatMap(n -> n.status().vespaVersion()
.map(version -> nodeRepository.dockerImage(n).withTag(version))));
}
private void ipAddressesToSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.forEach(array::addString);
}
private String lastElement(String path) {
if (path.endsWith("/"))
path = path.substring(0, path.length()-1);
int lastSlash = path.lastIndexOf("/");
if (lastSlash < 0) return path;
return path.substring(lastSlash+1);
}
} | class NodesResponse extends HttpResponse {
/** The responses this can create */
public enum ResponseType { nodeList, stateList, nodesInStateList, singleNode }
/** The request url minus parameters, with a trailing slash added if missing */
private final String parentUrl;
/** The parent url of nodes */
private final String nodeParentUrl;
private final NodeFilter filter;
private final boolean recursive;
private final Function<HostName, Optional<HostInfo>> orchestrator;
private final NodeRepository nodeRepository;
private final Slime slime;
private final NodeSerializer serializer = new NodeSerializer();
public NodesResponse(ResponseType responseType, HttpRequest request,
Orchestrator orchestrator, NodeRepository nodeRepository) {
super(200);
this.parentUrl = toParentUrl(request);
this.nodeParentUrl = toNodeParentUrl(request);
filter = NodesApiHandler.toNodeFilter(request);
this.recursive = request.getBooleanProperty("recursive");
this.orchestrator = orchestrator.getHostResolver();
this.nodeRepository = nodeRepository;
slime = new Slime();
Cursor root = slime.setObject();
switch (responseType) {
case nodeList: nodesToSlime(root); break;
case stateList : statesToSlime(root); break;
case nodesInStateList: nodesToSlime(serializer.stateFrom(lastElement(parentUrl)), root); break;
case singleNode : nodeToSlime(lastElement(parentUrl), root); break;
default: throw new IllegalArgumentException();
}
}
private String toParentUrl(HttpRequest request) {
URI uri = request.getUri();
String parentUrl = uri.getScheme() + ":
if ( ! parentUrl.endsWith("/"))
parentUrl = parentUrl + "/";
return parentUrl;
}
private String toNodeParentUrl(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
@Override
public void render(OutputStream stream) throws IOException {
new JsonFormat(true).encode(stream, slime);
}
@Override
public String getContentType() {
return "application/json";
}
private void statesToSlime(Cursor root) {
Cursor states = root.setObject("states");
for (Node.State state : Node.State.values())
toSlime(state, states.setObject(serializer.toString(state)));
}
private void toSlime(Node.State state, Cursor object) {
object.setString("url", parentUrl + serializer.toString(state));
if (recursive)
nodesToSlime(state, object);
}
/** Outputs the nodes in the given state to a node array */
private void nodesToSlime(Node.State state, Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
for (NodeType type : NodeType.values())
toSlime(nodeRepository.getNodes(type, state), nodeArray);
}
/** Outputs all the nodes to a node array */
private void nodesToSlime(Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
toSlime(nodeRepository.getNodes(), nodeArray);
}
private void toSlime(List<Node> nodes, Cursor array) {
for (Node node : nodes) {
if ( ! filter.matches(node)) continue;
toSlime(node, recursive, array.addObject());
}
}
private void nodeToSlime(String hostname, Cursor object) {
Node node = nodeRepository.getNode(hostname).orElseThrow(() ->
new NotFoundException("No node with hostname '" + hostname + "'"));
toSlime(node, true, object);
}
private void toSlime(ApplicationId id, Cursor object) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
}
private void toSlime(ClusterMembership membership, Cursor object) {
object.setString("clustertype", membership.cluster().type().name());
object.setString("clusterid", membership.cluster().id().value());
object.setString("group", String.valueOf(membership.cluster().group().get().index()));
object.setLong("index", membership.index());
object.setBool("retired", membership.retired());
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events()) {
Cursor object = array.addObject();
object.setString("event", event.type().name());
object.setLong("at", event.at().toEpochMilli());
object.setString("agent", event.agent().name());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", serializer.toString(resources.diskSpeed()));
object.setString("storageType", serializer.toString(resources.storageType()));
}
private Optional<DockerImage> currentDockerImage(Node node) {
return node.status().dockerImage()
.or(() -> Optional.of(node)
.filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
.flatMap(n -> n.status().vespaVersion()
.map(version -> nodeRepository.dockerImage(n).withTag(version))));
}
private void ipAddressesToSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.forEach(array::addString);
}
private String lastElement(String path) {
if (path.endsWith("/"))
path = path.substring(0, path.length()-1);
int lastSlash = path.lastIndexOf("/");
if (lastSlash < 0) return path;
return path.substring(lastSlash+1);
}
} |
For consistency you should use append to avoid the '+' entirely here to. | private void validateFail(String output, SearchCluster sc, String sdName, DeployLogger deployLogger) {
StringBuilder errMsg = new StringBuilder("For search cluster '" + sc.getClusterName() + "', search definition '" + sdName + "': error in rank setup. Details:\n");
for (String line : output.split("\n")) {
if (line.startsWith("debug\t")) continue;
try {
LogMessage logMessage = LogMessage.parseNativeFormat(line);
errMsg.append(logMessage.getLevel()).append(": ").append(logMessage.getPayload()).append("\n");
} catch (InvalidLogFormatException e) {
errMsg.append(line).append("\n");
}
}
if (ignoreValidationErrors) {
deployLogger.log(LogLevel.WARNING, errMsg + "(Continuing since ignoreValidationErrors flag is set.)");
} else {
throw new IllegalArgumentException(errMsg.toString());
}
} | StringBuilder errMsg = new StringBuilder("For search cluster '" + sc.getClusterName() + "', search definition '" + sdName + "': error in rank setup. Details:\n"); | private void validateFail(String output, SearchCluster sc, String sdName, DeployLogger deployLogger) {
StringBuilder errMsg = new StringBuilder("For search cluster '").append(sc.getClusterName()).append("', ")
.append("search definition '").append(sdName).append("': error in rank setup. Details:\n");
for (String line : output.split("\n")) {
if (line.startsWith("debug\t")) continue;
try {
LogMessage logMessage = LogMessage.parseNativeFormat(line);
errMsg.append(logMessage.getLevel()).append(": ").append(logMessage.getPayload()).append("\n");
} catch (InvalidLogFormatException e) {
errMsg.append(line).append("\n");
}
}
if (ignoreValidationErrors) {
deployLogger.log(LogLevel.WARNING, errMsg.append("(Continuing since ignoreValidationErrors flag is set.)").toString());
} else {
throw new IllegalArgumentException(errMsg.toString());
}
} | class RankSetupValidator extends Validator {
private static final Logger log = Logger.getLogger(RankSetupValidator.class.getName());
private static final String binaryName = "vespa-verify-ranksetup-bin ";
private final boolean ignoreValidationErrors;
public RankSetupValidator(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
}
@Override
public void validate(VespaModel model, DeployState deployState) {
File cfgDir = null;
try {
cfgDir = Files.createTempDirectory("verify-ranksetup." +
deployState.getProperties().applicationId().toFullString() +
".")
.toFile();
for (AbstractSearchCluster cluster : model.getSearchClusters()) {
if (cluster.isRealtime()) {
IndexedSearchCluster sc = (IndexedSearchCluster) cluster;
String clusterDir = cfgDir.getAbsolutePath() + "/" + sc.getClusterName() + "/";
for (DocumentDatabase docDb : sc.getDocumentDbs()) {
final String name = docDb.getDerivedConfiguration().getSearch().getName();
String searchDir = clusterDir + name + "/";
writeConfigs(searchDir, docDb);
if ( ! validate("dir:" + searchDir, sc, name, deployState.getDeployLogger(), cfgDir)) {
return;
}
}
}
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (cfgDir != null)
deleteTempDir(cfgDir);
}
}
private boolean validate(String configId, SearchCluster searchCluster, String sdName, DeployLogger deployLogger, File tempDir) {
Instant start = Instant.now();
try {
boolean ret = execValidate(configId, searchCluster, sdName, deployLogger);
if (!ret) {
deleteTempDir(tempDir);
}
log.log(LogLevel.DEBUG, String.format("Validating %s for %s, %s took %s ms",
sdName,
searchCluster,
configId,
Duration.between(start, Instant.now()).toMillis()));
return ret;
} catch (IllegalArgumentException e) {
deleteTempDir(tempDir);
throw e;
}
}
private void deleteTempDir(File dir) {
IOUtils.recursiveDeleteDir(dir);
}
private void writeConfigs(String dir, AbstractConfigProducer<?> producer) throws IOException {
RankProfilesConfig.Builder rpcb = new RankProfilesConfig.Builder();
((RankProfilesConfig.Producer) producer).getConfig(rpcb);
RankProfilesConfig rpc = new RankProfilesConfig(rpcb);
writeConfig(dir, RankProfilesConfig.getDefName() + ".cfg", rpc);
IndexschemaConfig.Builder iscb = new IndexschemaConfig.Builder();
((IndexschemaConfig.Producer) producer).getConfig(iscb);
IndexschemaConfig isc = new IndexschemaConfig(iscb);
writeConfig(dir, IndexschemaConfig.getDefName() + ".cfg", isc);
AttributesConfig.Builder acb = new AttributesConfig.Builder();
((AttributesConfig.Producer) producer).getConfig(acb);
AttributesConfig ac = new AttributesConfig(acb);
writeConfig(dir, AttributesConfig.getDefName() + ".cfg", ac);
RankingConstantsConfig.Builder rccb = new RankingConstantsConfig.Builder();
((RankingConstantsConfig.Producer) producer).getConfig(rccb);
RankingConstantsConfig rcc = new RankingConstantsConfig(rccb);
writeConfig(dir, RankingConstantsConfig.getDefName() + ".cfg", rcc);
ImportedFieldsConfig.Builder ifcb = new ImportedFieldsConfig.Builder();
((ImportedFieldsConfig.Producer) producer).getConfig(ifcb);
ImportedFieldsConfig ifc = new ImportedFieldsConfig(ifcb);
writeConfig(dir, ImportedFieldsConfig.getDefName() + ".cfg", ifc);
}
private static void writeConfig(String dir, String configName, ConfigInstance config) throws IOException {
IOUtils.writeFile(dir + configName, StringUtilities.implodeMultiline(ConfigInstance.serialize(config)), false);
}
private boolean execValidate(String configId, SearchCluster sc, String sdName, DeployLogger deployLogger) {
String job = String.format("%s %s", binaryName, configId);
ProcessExecuter executer = new ProcessExecuter();
try {
Pair<Integer, String> ret = executer.exec(job);
if (ret.getFirst() != 0) {
validateFail(ret.getSecond(), sc, sdName, deployLogger);
}
} catch (IOException e) {
validateWarn(e, deployLogger);
return false;
}
return true;
}
private void validateWarn(Exception e, DeployLogger deployLogger) {
String msg = "Unable to execute '"+ binaryName + "', validation of rank expressions will only take place when you start Vespa: " +
Exceptions.toMessageString(e);
deployLogger.log(LogLevel.WARNING, msg);
}
} | class RankSetupValidator extends Validator {
private static final Logger log = Logger.getLogger(RankSetupValidator.class.getName());
private static final String binaryName = "vespa-verify-ranksetup-bin ";
private final boolean ignoreValidationErrors;
public RankSetupValidator(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
}
@Override
public void validate(VespaModel model, DeployState deployState) {
File cfgDir = null;
try {
cfgDir = Files.createTempDirectory("verify-ranksetup." +
deployState.getProperties().applicationId().toFullString() +
".")
.toFile();
for (AbstractSearchCluster cluster : model.getSearchClusters()) {
if (cluster.isRealtime()) {
IndexedSearchCluster sc = (IndexedSearchCluster) cluster;
String clusterDir = cfgDir.getAbsolutePath() + "/" + sc.getClusterName() + "/";
for (DocumentDatabase docDb : sc.getDocumentDbs()) {
final String name = docDb.getDerivedConfiguration().getSearch().getName();
String searchDir = clusterDir + name + "/";
writeConfigs(searchDir, docDb);
if ( ! validate("dir:" + searchDir, sc, name, deployState.getDeployLogger(), cfgDir)) {
return;
}
}
}
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (cfgDir != null)
deleteTempDir(cfgDir);
}
}
private boolean validate(String configId, SearchCluster searchCluster, String sdName, DeployLogger deployLogger, File tempDir) {
Instant start = Instant.now();
try {
boolean ret = execValidate(configId, searchCluster, sdName, deployLogger);
if (!ret) {
deleteTempDir(tempDir);
}
log.log(LogLevel.DEBUG, String.format("Validating %s for %s, %s took %s ms",
sdName,
searchCluster,
configId,
Duration.between(start, Instant.now()).toMillis()));
return ret;
} catch (IllegalArgumentException e) {
deleteTempDir(tempDir);
throw e;
}
}
private void deleteTempDir(File dir) {
IOUtils.recursiveDeleteDir(dir);
}
private void writeConfigs(String dir, AbstractConfigProducer<?> producer) throws IOException {
RankProfilesConfig.Builder rpcb = new RankProfilesConfig.Builder();
((RankProfilesConfig.Producer) producer).getConfig(rpcb);
RankProfilesConfig rpc = new RankProfilesConfig(rpcb);
writeConfig(dir, RankProfilesConfig.getDefName() + ".cfg", rpc);
IndexschemaConfig.Builder iscb = new IndexschemaConfig.Builder();
((IndexschemaConfig.Producer) producer).getConfig(iscb);
IndexschemaConfig isc = new IndexschemaConfig(iscb);
writeConfig(dir, IndexschemaConfig.getDefName() + ".cfg", isc);
AttributesConfig.Builder acb = new AttributesConfig.Builder();
((AttributesConfig.Producer) producer).getConfig(acb);
AttributesConfig ac = new AttributesConfig(acb);
writeConfig(dir, AttributesConfig.getDefName() + ".cfg", ac);
RankingConstantsConfig.Builder rccb = new RankingConstantsConfig.Builder();
((RankingConstantsConfig.Producer) producer).getConfig(rccb);
RankingConstantsConfig rcc = new RankingConstantsConfig(rccb);
writeConfig(dir, RankingConstantsConfig.getDefName() + ".cfg", rcc);
ImportedFieldsConfig.Builder ifcb = new ImportedFieldsConfig.Builder();
((ImportedFieldsConfig.Producer) producer).getConfig(ifcb);
ImportedFieldsConfig ifc = new ImportedFieldsConfig(ifcb);
writeConfig(dir, ImportedFieldsConfig.getDefName() + ".cfg", ifc);
}
private static void writeConfig(String dir, String configName, ConfigInstance config) throws IOException {
IOUtils.writeFile(dir + configName, StringUtilities.implodeMultiline(ConfigInstance.serialize(config)), false);
}
private boolean execValidate(String configId, SearchCluster sc, String sdName, DeployLogger deployLogger) {
String job = String.format("%s %s", binaryName, configId);
ProcessExecuter executer = new ProcessExecuter();
try {
Pair<Integer, String> ret = executer.exec(job);
if (ret.getFirst() != 0) {
validateFail(ret.getSecond(), sc, sdName, deployLogger);
}
} catch (IOException e) {
validateWarn(e, deployLogger);
return false;
}
return true;
}
private void validateWarn(Exception e, DeployLogger deployLogger) {
String msg = "Unable to execute '"+ binaryName + "', validation of rank expressions will only take place when you start Vespa: " +
Exceptions.toMessageString(e);
deployLogger.log(LogLevel.WARNING, msg);
}
} |
is -> it? | public void relevant_information_from_deprovisioned_hosts_are_merged_into_readded_host() {
NodeRepositoryTester tester = new NodeRepositoryTester();
Instant testStart = tester.nodeRepository().clock().instant();
tester.clock().advance(Duration.ofSeconds(1));
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
assertFalse(tester.nodeRepository().getNode("host1").get().history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
Node host1 = tester.nodeRepository().getNode("host1").get();
host1 = host1.withWantToRetire(true, Agent.system, tester.nodeRepository().clock().instant());
host1 = host1.with(host1.status().withWantToDeprovision(true));
host1 = host1.withFirmwareVerifiedAt(tester.clock().instant());
host1 = host1.with(host1.status().withIncreasedFailCount());
host1 = host1.with(host1.reports().withReport(Report.basicReport("id", Report.Type.HARD_FAIL, tester.clock().instant(), "Test report")));
tester.nodeRepository().write(host1, tester.nodeRepository().lock(host1));
tester.nodeRepository().removeRecursively("host1");
host1 = tester.nodeRepository().getNode("host1").get();
assertEquals(Node.State.deprovisioned, host1.state());
assertTrue(host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
tester.addNode("id2", "host1", "default", NodeType.host);
host1 = tester.nodeRepository().getNode("host1").get();
assertEquals("This is the newly added node", "id2", host1.id());
assertFalse("The old 'host1' is removed",
tester.nodeRepository().getNode("host1", Node.State.deprovisioned).isPresent());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToRetire());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToDeprovision());
assertTrue("Transferred from deprovisioned host", host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
assertTrue("Transferred from deprovisioned host", host1.status().firmwareVerifiedAt().isPresent());
assertEquals("Transferred from deprovisioned host", 1, host1.status().failCount());
assertEquals("Transferred from deprovisioned host", 1, host1.reports().getReports().size());
} | public void relevant_information_from_deprovisioned_hosts_are_merged_into_readded_host() {
NodeRepositoryTester tester = new NodeRepositoryTester();
Instant testStart = tester.nodeRepository().clock().instant();
tester.clock().advance(Duration.ofSeconds(1));
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
assertFalse(tester.nodeRepository().getNode("host1").get().history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
Node host1 = tester.nodeRepository().getNode("host1").get();
host1 = host1.withWantToRetire(true, Agent.system, tester.nodeRepository().clock().instant());
host1 = host1.with(host1.status().withWantToDeprovision(true));
host1 = host1.withFirmwareVerifiedAt(tester.clock().instant());
host1 = host1.with(host1.status().withIncreasedFailCount());
host1 = host1.with(host1.reports().withReport(Report.basicReport("id", Report.Type.HARD_FAIL, tester.clock().instant(), "Test report")));
tester.nodeRepository().write(host1, tester.nodeRepository().lock(host1));
tester.nodeRepository().removeRecursively("host1");
host1 = tester.nodeRepository().getNode("host1").get();
assertEquals(Node.State.deprovisioned, host1.state());
assertTrue(host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
tester.addNode("id2", "host1", "default", NodeType.host);
host1 = tester.nodeRepository().getNode("host1").get();
assertEquals("This is the newly added node", "id2", host1.id());
assertFalse("The old 'host1' is removed",
tester.nodeRepository().getNode("host1", Node.State.deprovisioned).isPresent());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToRetire());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToDeprovision());
assertTrue("Transferred from deprovisioned host", host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
assertTrue("Transferred from deprovisioned host", host1.status().firmwareVerifiedAt().isPresent());
assertEquals("Transferred from deprovisioned host", 1, host1.status().failCount());
assertEquals("Transferred from deprovisioned host", 1, host1.reports().getReports().size());
} | class NodeRepositoryTest {
@Test
public void nodeRepositoryTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertEquals(0, tester.nodeRepository().getNodes().size());
tester.addNode("id1", "host1", "default", NodeType.tenant);
tester.addNode("id2", "host2", "default", NodeType.tenant);
tester.addNode("id3", "host3", "default", NodeType.tenant);
assertEquals(3, tester.nodeRepository().getNodes().size());
tester.nodeRepository().park("host2", true, Agent.system, "Parking to unit test");
tester.nodeRepository().removeRecursively("host2");
assertEquals(2, tester.nodeRepository().getNodes().size());
}
@Test
public void only_allow_docker_containers_remove_in_ready() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "docker", NodeType.tenant);
try {
tester.nodeRepository().removeRecursively("host1");
fail("Should not be able to delete docker container node by itself in state provisioned");
} catch (IllegalArgumentException ignored) {
}
tester.nodeRepository().setReady("host1", Agent.system, getClass().getSimpleName());
tester.nodeRepository().removeRecursively("host1");
}
@Test
public void only_remove_tenant_docker_containers_for_new_allocations() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("host1", "host1", "default", NodeType.tenant);
tester.addNode("host2", "host2", "docker", NodeType.tenant);
tester.addNode("cfg1", "cfg1", "docker", NodeType.config);
tester.setNodeState("host1", Node.State.dirty);
tester.setNodeState("host2", Node.State.dirty);
tester.setNodeState("cfg1", Node.State.dirty);
tester.nodeRepository().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("host1").get().state());
tester.nodeRepository().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName());
assertFalse(tester.nodeRepository().getNode("host2").isPresent());
tester.nodeRepository().markNodeAvailableForNewAllocation("cfg1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("cfg1").get().state());
}
@Test
public void fail_readying_with_hard_fail() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("host1", "host1", "default", NodeType.tenant);
tester.addNode("host2", "host2", "default", NodeType.tenant);
tester.setNodeState("host1", Node.State.dirty);
tester.setNodeState("host2", Node.State.dirty);
Node node2 = tester.nodeRepository().getNode("host2").orElseThrow();
var reportsBuilder = new Reports.Builder(node2.reports());
reportsBuilder.setReport(Report.basicReport("reportId", Report.Type.HARD_FAIL, Instant.EPOCH, "hardware failure"));
node2 = node2.with(reportsBuilder.build());
tester.nodeRepository().write(node2, () -> {});
tester.nodeRepository().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("host1").get().state());
try {
tester.nodeRepository().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName());
fail();
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("hardware failure"));
}
}
@Test
public void delete_host_only_after_all_the_children_have_been_deleted() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
tester.addNode("node10", "node10", "host1", "docker", NodeType.tenant);
tester.addNode("node11", "node11", "host1", "docker", NodeType.tenant);
tester.addNode("node12", "node12", "host1", "docker", NodeType.tenant);
tester.addNode("node20", "node20", "host2", "docker", NodeType.tenant);
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.setNodeState("node11", Node.State.active);
try {
tester.nodeRepository().removeRecursively("host1");
fail("Should not be able to delete host node, one of the children is in state active");
} catch (IllegalArgumentException ignored) {
}
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.nodeRepository().removeRecursively("host2");
assertEquals(5, tester.nodeRepository().getNodes().size());
assertEquals(Node.State.deprovisioned, tester.nodeRepository().getNode("host2").get().state());
tester.nodeRepository().fail("node11", Agent.system, getClass().getSimpleName());
tester.nodeRepository().setReady("node12", Agent.system, getClass().getSimpleName());
tester.nodeRepository().removeRecursively("node12");
assertEquals(4, tester.nodeRepository().getNodes().size());
tester.nodeRepository().removeRecursively("host1");
assertEquals(Node.State.deprovisioned, tester.nodeRepository().getNode("host1").get().state());
}
@Test
@Test
public void dirty_host_only_if_we_can_dirty_children() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
tester.addNode("node10", "node10", "host1", "docker", NodeType.tenant);
tester.addNode("node11", "node11", "host1", "docker", NodeType.tenant);
tester.addNode("node12", "node12", "host1", "docker", NodeType.tenant);
tester.addNode("node20", "node20", "host2", "docker", NodeType.tenant);
tester.setNodeState("node11", Node.State.ready);
tester.setNodeState("node12", Node.State.active);
tester.setNodeState("node20", Node.State.failed);
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.nodeRepository().dirtyRecursively("host2", Agent.system, NodeRepositoryTest.class.getSimpleName());
assertEquals(asSet("host2", "node20"), filterNodes(tester, node -> node.state() == Node.State.dirty));
try {
tester.nodeRepository().dirtyRecursively("host1", Agent.system, NodeRepositoryTest.class.getSimpleName());
fail("Should not be able to dirty host1");
} catch (IllegalArgumentException ignored) { }
assertEquals(asSet("host2", "node20"), filterNodes(tester, node -> node.state() == Node.State.dirty));
}
private static Set<String> asSet(String... elements) {
return new HashSet<>(Arrays.asList(elements));
}
private static Set<String> filterNodes(NodeRepositoryTester tester, Predicate<Node> filter) {
return tester.nodeRepository()
.getNodes().stream()
.filter(filter)
.map(Node::hostname)
.collect(Collectors.toSet());
}
} | class NodeRepositoryTest {
@Test
public void nodeRepositoryTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertEquals(0, tester.nodeRepository().getNodes().size());
tester.addNode("id1", "host1", "default", NodeType.tenant);
tester.addNode("id2", "host2", "default", NodeType.tenant);
tester.addNode("id3", "host3", "default", NodeType.tenant);
assertEquals(3, tester.nodeRepository().getNodes().size());
tester.nodeRepository().park("host2", true, Agent.system, "Parking to unit test");
tester.nodeRepository().removeRecursively("host2");
assertEquals(2, tester.nodeRepository().getNodes().size());
}
@Test
public void only_allow_docker_containers_remove_in_ready() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "docker", NodeType.tenant);
try {
tester.nodeRepository().removeRecursively("host1");
fail("Should not be able to delete docker container node by itself in state provisioned");
} catch (IllegalArgumentException ignored) {
}
tester.nodeRepository().setReady("host1", Agent.system, getClass().getSimpleName());
tester.nodeRepository().removeRecursively("host1");
}
@Test
public void only_remove_tenant_docker_containers_for_new_allocations() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("host1", "host1", "default", NodeType.tenant);
tester.addNode("host2", "host2", "docker", NodeType.tenant);
tester.addNode("cfg1", "cfg1", "docker", NodeType.config);
tester.setNodeState("host1", Node.State.dirty);
tester.setNodeState("host2", Node.State.dirty);
tester.setNodeState("cfg1", Node.State.dirty);
tester.nodeRepository().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("host1").get().state());
tester.nodeRepository().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName());
assertFalse(tester.nodeRepository().getNode("host2").isPresent());
tester.nodeRepository().markNodeAvailableForNewAllocation("cfg1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("cfg1").get().state());
}
@Test
public void fail_readying_with_hard_fail() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("host1", "host1", "default", NodeType.tenant);
tester.addNode("host2", "host2", "default", NodeType.tenant);
tester.setNodeState("host1", Node.State.dirty);
tester.setNodeState("host2", Node.State.dirty);
Node node2 = tester.nodeRepository().getNode("host2").orElseThrow();
var reportsBuilder = new Reports.Builder(node2.reports());
reportsBuilder.setReport(Report.basicReport("reportId", Report.Type.HARD_FAIL, Instant.EPOCH, "hardware failure"));
node2 = node2.with(reportsBuilder.build());
tester.nodeRepository().write(node2, () -> {});
tester.nodeRepository().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("host1").get().state());
try {
tester.nodeRepository().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName());
fail();
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("hardware failure"));
}
}
@Test
public void delete_host_only_after_all_the_children_have_been_deleted() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
tester.addNode("node10", "node10", "host1", "docker", NodeType.tenant);
tester.addNode("node11", "node11", "host1", "docker", NodeType.tenant);
tester.addNode("node12", "node12", "host1", "docker", NodeType.tenant);
tester.addNode("node20", "node20", "host2", "docker", NodeType.tenant);
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.setNodeState("node11", Node.State.active);
try {
tester.nodeRepository().removeRecursively("host1");
fail("Should not be able to delete host node, one of the children is in state active");
} catch (IllegalArgumentException ignored) {
}
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.nodeRepository().removeRecursively("host2");
assertEquals(5, tester.nodeRepository().getNodes().size());
assertEquals(Node.State.deprovisioned, tester.nodeRepository().getNode("host2").get().state());
tester.nodeRepository().fail("node11", Agent.system, getClass().getSimpleName());
tester.nodeRepository().setReady("node12", Agent.system, getClass().getSimpleName());
tester.nodeRepository().removeRecursively("node12");
assertEquals(4, tester.nodeRepository().getNodes().size());
tester.nodeRepository().removeRecursively("host1");
assertEquals(Node.State.deprovisioned, tester.nodeRepository().getNode("host1").get().state());
}
@Test
@Test
public void dirty_host_only_if_we_can_dirty_children() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
tester.addNode("node10", "node10", "host1", "docker", NodeType.tenant);
tester.addNode("node11", "node11", "host1", "docker", NodeType.tenant);
tester.addNode("node12", "node12", "host1", "docker", NodeType.tenant);
tester.addNode("node20", "node20", "host2", "docker", NodeType.tenant);
tester.setNodeState("node11", Node.State.ready);
tester.setNodeState("node12", Node.State.active);
tester.setNodeState("node20", Node.State.failed);
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.nodeRepository().dirtyRecursively("host2", Agent.system, NodeRepositoryTest.class.getSimpleName());
assertEquals(asSet("host2", "node20"), filterNodes(tester, node -> node.state() == Node.State.dirty));
try {
tester.nodeRepository().dirtyRecursively("host1", Agent.system, NodeRepositoryTest.class.getSimpleName());
fail("Should not be able to dirty host1");
} catch (IllegalArgumentException ignored) { }
assertEquals(asSet("host2", "node20"), filterNodes(tester, node -> node.state() == Node.State.dirty));
}
private static Set<String> asSet(String... elements) {
return new HashSet<>(Arrays.asList(elements));
}
private static Set<String> filterNodes(NodeRepositoryTester tester, Predicate<Node> filter) {
return tester.nodeRepository()
.getNodes().stream()
.filter(filter)
.map(Node::hostname)
.collect(Collectors.toSet());
}
} | |
Yes, apparently IntelliJ didn't fix all this when I accepted the suggested change from String to StringBuilder. Will fix | private void validateFail(String output, SearchCluster sc, String sdName, DeployLogger deployLogger) {
StringBuilder errMsg = new StringBuilder("For search cluster '" + sc.getClusterName() + "', search definition '" + sdName + "': error in rank setup. Details:\n");
for (String line : output.split("\n")) {
if (line.startsWith("debug\t")) continue;
try {
LogMessage logMessage = LogMessage.parseNativeFormat(line);
errMsg.append(logMessage.getLevel()).append(": ").append(logMessage.getPayload()).append("\n");
} catch (InvalidLogFormatException e) {
errMsg.append(line).append("\n");
}
}
if (ignoreValidationErrors) {
deployLogger.log(LogLevel.WARNING, errMsg + "(Continuing since ignoreValidationErrors flag is set.)");
} else {
throw new IllegalArgumentException(errMsg.toString());
}
} | StringBuilder errMsg = new StringBuilder("For search cluster '" + sc.getClusterName() + "', search definition '" + sdName + "': error in rank setup. Details:\n"); | private void validateFail(String output, SearchCluster sc, String sdName, DeployLogger deployLogger) {
StringBuilder errMsg = new StringBuilder("For search cluster '").append(sc.getClusterName()).append("', ")
.append("search definition '").append(sdName).append("': error in rank setup. Details:\n");
for (String line : output.split("\n")) {
if (line.startsWith("debug\t")) continue;
try {
LogMessage logMessage = LogMessage.parseNativeFormat(line);
errMsg.append(logMessage.getLevel()).append(": ").append(logMessage.getPayload()).append("\n");
} catch (InvalidLogFormatException e) {
errMsg.append(line).append("\n");
}
}
if (ignoreValidationErrors) {
deployLogger.log(LogLevel.WARNING, errMsg.append("(Continuing since ignoreValidationErrors flag is set.)").toString());
} else {
throw new IllegalArgumentException(errMsg.toString());
}
} | class RankSetupValidator extends Validator {
private static final Logger log = Logger.getLogger(RankSetupValidator.class.getName());
private static final String binaryName = "vespa-verify-ranksetup-bin ";
private final boolean ignoreValidationErrors;
public RankSetupValidator(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
}
@Override
public void validate(VespaModel model, DeployState deployState) {
File cfgDir = null;
try {
cfgDir = Files.createTempDirectory("verify-ranksetup." +
deployState.getProperties().applicationId().toFullString() +
".")
.toFile();
for (AbstractSearchCluster cluster : model.getSearchClusters()) {
if (cluster.isRealtime()) {
IndexedSearchCluster sc = (IndexedSearchCluster) cluster;
String clusterDir = cfgDir.getAbsolutePath() + "/" + sc.getClusterName() + "/";
for (DocumentDatabase docDb : sc.getDocumentDbs()) {
final String name = docDb.getDerivedConfiguration().getSearch().getName();
String searchDir = clusterDir + name + "/";
writeConfigs(searchDir, docDb);
if ( ! validate("dir:" + searchDir, sc, name, deployState.getDeployLogger(), cfgDir)) {
return;
}
}
}
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (cfgDir != null)
deleteTempDir(cfgDir);
}
}
private boolean validate(String configId, SearchCluster searchCluster, String sdName, DeployLogger deployLogger, File tempDir) {
Instant start = Instant.now();
try {
boolean ret = execValidate(configId, searchCluster, sdName, deployLogger);
if (!ret) {
deleteTempDir(tempDir);
}
log.log(LogLevel.DEBUG, String.format("Validating %s for %s, %s took %s ms",
sdName,
searchCluster,
configId,
Duration.between(start, Instant.now()).toMillis()));
return ret;
} catch (IllegalArgumentException e) {
deleteTempDir(tempDir);
throw e;
}
}
private void deleteTempDir(File dir) {
IOUtils.recursiveDeleteDir(dir);
}
private void writeConfigs(String dir, AbstractConfigProducer<?> producer) throws IOException {
RankProfilesConfig.Builder rpcb = new RankProfilesConfig.Builder();
((RankProfilesConfig.Producer) producer).getConfig(rpcb);
RankProfilesConfig rpc = new RankProfilesConfig(rpcb);
writeConfig(dir, RankProfilesConfig.getDefName() + ".cfg", rpc);
IndexschemaConfig.Builder iscb = new IndexschemaConfig.Builder();
((IndexschemaConfig.Producer) producer).getConfig(iscb);
IndexschemaConfig isc = new IndexschemaConfig(iscb);
writeConfig(dir, IndexschemaConfig.getDefName() + ".cfg", isc);
AttributesConfig.Builder acb = new AttributesConfig.Builder();
((AttributesConfig.Producer) producer).getConfig(acb);
AttributesConfig ac = new AttributesConfig(acb);
writeConfig(dir, AttributesConfig.getDefName() + ".cfg", ac);
RankingConstantsConfig.Builder rccb = new RankingConstantsConfig.Builder();
((RankingConstantsConfig.Producer) producer).getConfig(rccb);
RankingConstantsConfig rcc = new RankingConstantsConfig(rccb);
writeConfig(dir, RankingConstantsConfig.getDefName() + ".cfg", rcc);
ImportedFieldsConfig.Builder ifcb = new ImportedFieldsConfig.Builder();
((ImportedFieldsConfig.Producer) producer).getConfig(ifcb);
ImportedFieldsConfig ifc = new ImportedFieldsConfig(ifcb);
writeConfig(dir, ImportedFieldsConfig.getDefName() + ".cfg", ifc);
}
private static void writeConfig(String dir, String configName, ConfigInstance config) throws IOException {
IOUtils.writeFile(dir + configName, StringUtilities.implodeMultiline(ConfigInstance.serialize(config)), false);
}
private boolean execValidate(String configId, SearchCluster sc, String sdName, DeployLogger deployLogger) {
String job = String.format("%s %s", binaryName, configId);
ProcessExecuter executer = new ProcessExecuter();
try {
Pair<Integer, String> ret = executer.exec(job);
if (ret.getFirst() != 0) {
validateFail(ret.getSecond(), sc, sdName, deployLogger);
}
} catch (IOException e) {
validateWarn(e, deployLogger);
return false;
}
return true;
}
private void validateWarn(Exception e, DeployLogger deployLogger) {
String msg = "Unable to execute '"+ binaryName + "', validation of rank expressions will only take place when you start Vespa: " +
Exceptions.toMessageString(e);
deployLogger.log(LogLevel.WARNING, msg);
}
} | class RankSetupValidator extends Validator {
private static final Logger log = Logger.getLogger(RankSetupValidator.class.getName());
private static final String binaryName = "vespa-verify-ranksetup-bin ";
private final boolean ignoreValidationErrors;
public RankSetupValidator(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
}
@Override
public void validate(VespaModel model, DeployState deployState) {
File cfgDir = null;
try {
cfgDir = Files.createTempDirectory("verify-ranksetup." +
deployState.getProperties().applicationId().toFullString() +
".")
.toFile();
for (AbstractSearchCluster cluster : model.getSearchClusters()) {
if (cluster.isRealtime()) {
IndexedSearchCluster sc = (IndexedSearchCluster) cluster;
String clusterDir = cfgDir.getAbsolutePath() + "/" + sc.getClusterName() + "/";
for (DocumentDatabase docDb : sc.getDocumentDbs()) {
final String name = docDb.getDerivedConfiguration().getSearch().getName();
String searchDir = clusterDir + name + "/";
writeConfigs(searchDir, docDb);
if ( ! validate("dir:" + searchDir, sc, name, deployState.getDeployLogger(), cfgDir)) {
return;
}
}
}
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (cfgDir != null)
deleteTempDir(cfgDir);
}
}
private boolean validate(String configId, SearchCluster searchCluster, String sdName, DeployLogger deployLogger, File tempDir) {
Instant start = Instant.now();
try {
boolean ret = execValidate(configId, searchCluster, sdName, deployLogger);
if (!ret) {
deleteTempDir(tempDir);
}
log.log(LogLevel.DEBUG, String.format("Validating %s for %s, %s took %s ms",
sdName,
searchCluster,
configId,
Duration.between(start, Instant.now()).toMillis()));
return ret;
} catch (IllegalArgumentException e) {
deleteTempDir(tempDir);
throw e;
}
}
private void deleteTempDir(File dir) {
IOUtils.recursiveDeleteDir(dir);
}
private void writeConfigs(String dir, AbstractConfigProducer<?> producer) throws IOException {
RankProfilesConfig.Builder rpcb = new RankProfilesConfig.Builder();
((RankProfilesConfig.Producer) producer).getConfig(rpcb);
RankProfilesConfig rpc = new RankProfilesConfig(rpcb);
writeConfig(dir, RankProfilesConfig.getDefName() + ".cfg", rpc);
IndexschemaConfig.Builder iscb = new IndexschemaConfig.Builder();
((IndexschemaConfig.Producer) producer).getConfig(iscb);
IndexschemaConfig isc = new IndexschemaConfig(iscb);
writeConfig(dir, IndexschemaConfig.getDefName() + ".cfg", isc);
AttributesConfig.Builder acb = new AttributesConfig.Builder();
((AttributesConfig.Producer) producer).getConfig(acb);
AttributesConfig ac = new AttributesConfig(acb);
writeConfig(dir, AttributesConfig.getDefName() + ".cfg", ac);
RankingConstantsConfig.Builder rccb = new RankingConstantsConfig.Builder();
((RankingConstantsConfig.Producer) producer).getConfig(rccb);
RankingConstantsConfig rcc = new RankingConstantsConfig(rccb);
writeConfig(dir, RankingConstantsConfig.getDefName() + ".cfg", rcc);
ImportedFieldsConfig.Builder ifcb = new ImportedFieldsConfig.Builder();
((ImportedFieldsConfig.Producer) producer).getConfig(ifcb);
ImportedFieldsConfig ifc = new ImportedFieldsConfig(ifcb);
writeConfig(dir, ImportedFieldsConfig.getDefName() + ".cfg", ifc);
}
private static void writeConfig(String dir, String configName, ConfigInstance config) throws IOException {
IOUtils.writeFile(dir + configName, StringUtilities.implodeMultiline(ConfigInstance.serialize(config)), false);
}
private boolean execValidate(String configId, SearchCluster sc, String sdName, DeployLogger deployLogger) {
String job = String.format("%s %s", binaryName, configId);
ProcessExecuter executer = new ProcessExecuter();
try {
Pair<Integer, String> ret = executer.exec(job);
if (ret.getFirst() != 0) {
validateFail(ret.getSecond(), sc, sdName, deployLogger);
}
} catch (IOException e) {
validateWarn(e, deployLogger);
return false;
}
return true;
}
private void validateWarn(Exception e, DeployLogger deployLogger) {
String msg = "Unable to execute '"+ binaryName + "', validation of rank expressions will only take place when you start Vespa: " +
Exceptions.toMessageString(e);
deployLogger.log(LogLevel.WARNING, msg);
}
} |
I think you can just compare the Optionals directly ```suggestion assertEquals(expectedHost.dockerImageRepo(), deserializedHost.dockerImageRepo()); ``` | private void assertAllocatedHosts(AllocatedHosts expectedHosts, NodeFlavors configuredFlavors) throws IOException {
AllocatedHosts deserializedHosts = fromJson(toJson(expectedHosts), Optional.of(configuredFlavors));
assertEquals(expectedHosts, deserializedHosts);
for (HostSpec expectedHost : expectedHosts.getHosts()) {
HostSpec deserializedHost = requireHost(expectedHost.hostname(), deserializedHosts);
assertEquals(expectedHost.hostname(), deserializedHost.hostname());
assertEquals(expectedHost.membership(), deserializedHost.membership());
assertEquals(expectedHost.flavor(), deserializedHost.flavor());
assertEquals(expectedHost.version(), deserializedHost.version());
assertEquals(expectedHost.networkPorts(), deserializedHost.networkPorts());
assertEquals(expectedHost.aliases(), deserializedHost.aliases());
assertEquals(expectedHost.requestedResources(), deserializedHost.requestedResources());
assertEquals(expectedHost.dockerImageRepo().orElse(""), deserializedHost.dockerImageRepo().orElse(""));
}
} | assertEquals(expectedHost.dockerImageRepo().orElse(""), deserializedHost.dockerImageRepo().orElse("")); | private void assertAllocatedHosts(AllocatedHosts expectedHosts, NodeFlavors configuredFlavors) throws IOException {
AllocatedHosts deserializedHosts = fromJson(toJson(expectedHosts), Optional.of(configuredFlavors));
assertEquals(expectedHosts, deserializedHosts);
for (HostSpec expectedHost : expectedHosts.getHosts()) {
HostSpec deserializedHost = requireHost(expectedHost.hostname(), deserializedHosts);
assertEquals(expectedHost.hostname(), deserializedHost.hostname());
assertEquals(expectedHost.membership(), deserializedHost.membership());
assertEquals(expectedHost.flavor(), deserializedHost.flavor());
assertEquals(expectedHost.version(), deserializedHost.version());
assertEquals(expectedHost.networkPorts(), deserializedHost.networkPorts());
assertEquals(expectedHost.aliases(), deserializedHost.aliases());
assertEquals(expectedHost.requestedResources(), deserializedHost.requestedResources());
assertEquals(expectedHost.dockerImageRepo(), deserializedHost.dockerImageRepo());
}
} | class AllocatedHostsSerializerTest {
@Test
public void testAllocatedHostsSerialization() throws IOException {
NodeFlavors configuredFlavors = configuredFlavorsFrom("C/12/45/100", 12, 45, 100, 50, Flavor.Type.BARE_METAL);
Set<HostSpec> hosts = new LinkedHashSet<>();
hosts.add(new HostSpec("empty",
Optional.empty()));
hosts.add(new HostSpec("with-aliases",
List.of("alias1", "alias2")));
hosts.add(new HostSpec("allocated",
Optional.of(ClusterMembership.from("container/test/0/0", Version.fromString("6.73.1"),
Optional.of("docker.foo.com:4443/vespa/bar")))));
hosts.add(new HostSpec("flavor-from-resources-1",
Collections.emptyList(), new Flavor(new NodeResources(0.5, 3.1, 4, 1))));
hosts.add(new HostSpec("flavor-from-resources-2",
Collections.emptyList(),
Optional.of(new Flavor(new NodeResources(0.5, 3.1, 4, 1, NodeResources.DiskSpeed.slow))),
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.of(new NodeResources(0.5, 3.1, 4, 1, NodeResources.DiskSpeed.any))));
hosts.add(new HostSpec("configured-flavor",
Collections.emptyList(), configuredFlavors.getFlavorOrThrow("C/12/45/100")));
hosts.add(new HostSpec("with-version",
Collections.emptyList(), Optional.empty(), Optional.empty(), Optional.of(Version.fromString("3.4.5"))));
hosts.add(new HostSpec("with-ports",
Collections.emptyList(), Optional.empty(), Optional.empty(), Optional.empty(),
Optional.of(new NetworkPorts(List.of(new NetworkPorts.Allocation(1234, "service1", "configId1", "suffix1"),
new NetworkPorts.Allocation(4567, "service2", "configId2", "suffix2"))))));
assertAllocatedHosts(AllocatedHosts.withHosts(hosts), configuredFlavors);
}
private HostSpec requireHost(String hostname, AllocatedHosts hosts) {
for (HostSpec host : hosts.getHosts())
if (host.hostname().equals(hostname))
return host;
throw new IllegalArgumentException("No host " + hostname + " is present");
}
private NodeFlavors configuredFlavorsFrom(String flavorName, double cpu, double mem, double disk, double bandwidth, Flavor.Type type) {
FlavorsConfig.Builder b = new FlavorsConfig.Builder();
FlavorsConfig.Flavor.Builder flavor = new FlavorsConfig.Flavor.Builder();
flavor.name(flavorName);
flavor.minDiskAvailableGb(disk);
flavor.minCpuCores(cpu);
flavor.minMainMemoryAvailableGb(mem);
flavor.bandwidth(bandwidth);
flavor.environment(type.name());
b.flavor(flavor);
return new NodeFlavors(b.build());
}
} | class AllocatedHostsSerializerTest {
@Test
public void testAllocatedHostsSerialization() throws IOException {
NodeFlavors configuredFlavors = configuredFlavorsFrom("C/12/45/100", 12, 45, 100, 50, Flavor.Type.BARE_METAL);
Set<HostSpec> hosts = new LinkedHashSet<>();
hosts.add(new HostSpec("empty",
Optional.empty()));
hosts.add(new HostSpec("with-aliases",
List.of("alias1", "alias2")));
hosts.add(new HostSpec("allocated",
Optional.of(ClusterMembership.from("container/test/0/0", Version.fromString("6.73.1"),
Optional.of("docker.foo.com:4443/vespa/bar")))));
hosts.add(new HostSpec("flavor-from-resources-1",
Collections.emptyList(), new Flavor(new NodeResources(0.5, 3.1, 4, 1))));
hosts.add(new HostSpec("flavor-from-resources-2",
Collections.emptyList(),
Optional.of(new Flavor(new NodeResources(0.5, 3.1, 4, 1, NodeResources.DiskSpeed.slow))),
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.of(new NodeResources(0.5, 3.1, 4, 1, NodeResources.DiskSpeed.any))));
hosts.add(new HostSpec("configured-flavor",
Collections.emptyList(), configuredFlavors.getFlavorOrThrow("C/12/45/100")));
hosts.add(new HostSpec("with-version",
Collections.emptyList(), Optional.empty(), Optional.empty(), Optional.of(Version.fromString("3.4.5"))));
hosts.add(new HostSpec("with-ports",
Collections.emptyList(), Optional.empty(), Optional.empty(), Optional.empty(),
Optional.of(new NetworkPorts(List.of(new NetworkPorts.Allocation(1234, "service1", "configId1", "suffix1"),
new NetworkPorts.Allocation(4567, "service2", "configId2", "suffix2"))))));
assertAllocatedHosts(AllocatedHosts.withHosts(hosts), configuredFlavors);
}
private HostSpec requireHost(String hostname, AllocatedHosts hosts) {
for (HostSpec host : hosts.getHosts())
if (host.hostname().equals(hostname))
return host;
throw new IllegalArgumentException("No host " + hostname + " is present");
}
private NodeFlavors configuredFlavorsFrom(String flavorName, double cpu, double mem, double disk, double bandwidth, Flavor.Type type) {
FlavorsConfig.Builder b = new FlavorsConfig.Builder();
FlavorsConfig.Flavor.Builder flavor = new FlavorsConfig.Flavor.Builder();
flavor.name(flavorName);
flavor.minDiskAvailableGb(disk);
flavor.minCpuCores(cpu);
flavor.minMainMemoryAvailableGb(mem);
flavor.bandwidth(bandwidth);
flavor.environment(type.name());
b.flavor(flavor);
return new NodeFlavors(b.build());
}
} |
Why did this happen? | public void testNodeMetricsDb() {
ManualClock clock = new ManualClock();
NodeMetricsDb db = new NodeMetricsDb();
List<NodeMetrics.MetricValue> values = new ArrayList<>();
for (int i = 0; i < 40; i++) {
values.add(new NodeMetrics.MetricValue("host0", "cpu.util", clock.instant().getEpochSecond(), 0.9f));
clock.advance(Duration.ofHours(1));
}
db.add(values);
assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
db.gc(clock);
assertEquals(23, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
} | assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount()); | public void testNodeMetricsDb() {
ManualClock clock = new ManualClock();
NodeMetricsDb db = new NodeMetricsDb();
List<NodeMetrics.MetricValue> values = new ArrayList<>();
for (int i = 0; i < 40; i++) {
values.add(new NodeMetrics.MetricValue("host0", "cpu.util", clock.instant().getEpochSecond(), 0.9f));
clock.advance(Duration.ofHours(1));
}
db.add(values);
assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
db.gc(clock);
assertEquals(23, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
} | class NodeMetricsDbTest {
@Test
} | class NodeMetricsDbTest {
@Test
} |
We're now going through an API which uses seconds which round the current time to the nearest thousand, which causes the window to end up right at the boundary of the 30th value. | public void testNodeMetricsDb() {
ManualClock clock = new ManualClock();
NodeMetricsDb db = new NodeMetricsDb();
List<NodeMetrics.MetricValue> values = new ArrayList<>();
for (int i = 0; i < 40; i++) {
values.add(new NodeMetrics.MetricValue("host0", "cpu.util", clock.instant().getEpochSecond(), 0.9f));
clock.advance(Duration.ofHours(1));
}
db.add(values);
assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
db.gc(clock);
assertEquals(23, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
} | assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount()); | public void testNodeMetricsDb() {
ManualClock clock = new ManualClock();
NodeMetricsDb db = new NodeMetricsDb();
List<NodeMetrics.MetricValue> values = new ArrayList<>();
for (int i = 0; i < 40; i++) {
values.add(new NodeMetrics.MetricValue("host0", "cpu.util", clock.instant().getEpochSecond(), 0.9f));
clock.advance(Duration.ofHours(1));
}
db.add(values);
assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
db.gc(clock);
assertEquals(23, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
} | class NodeMetricsDbTest {
@Test
} | class NodeMetricsDbTest {
@Test
} |
Oh, so there was 1/1000 chance this test would fail before? If it ran exactly on the second? | public void testNodeMetricsDb() {
ManualClock clock = new ManualClock();
NodeMetricsDb db = new NodeMetricsDb();
List<NodeMetrics.MetricValue> values = new ArrayList<>();
for (int i = 0; i < 40; i++) {
values.add(new NodeMetrics.MetricValue("host0", "cpu.util", clock.instant().getEpochSecond(), 0.9f));
clock.advance(Duration.ofHours(1));
}
db.add(values);
assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
db.gc(clock);
assertEquals(23, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
} | assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount()); | public void testNodeMetricsDb() {
ManualClock clock = new ManualClock();
NodeMetricsDb db = new NodeMetricsDb();
List<NodeMetrics.MetricValue> values = new ArrayList<>();
for (int i = 0; i < 40; i++) {
values.add(new NodeMetrics.MetricValue("host0", "cpu.util", clock.instant().getEpochSecond(), 0.9f));
clock.advance(Duration.ofHours(1));
}
db.add(values);
assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
db.gc(clock);
assertEquals(23, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
} | class NodeMetricsDbTest {
@Test
} | class NodeMetricsDbTest {
@Test
} |
Yes, I think so. "Problem" here is I don't really care precisely how many ends up in the window .. | public void testNodeMetricsDb() {
ManualClock clock = new ManualClock();
NodeMetricsDb db = new NodeMetricsDb();
List<NodeMetrics.MetricValue> values = new ArrayList<>();
for (int i = 0; i < 40; i++) {
values.add(new NodeMetrics.MetricValue("host0", "cpu.util", clock.instant().getEpochSecond(), 0.9f));
clock.advance(Duration.ofHours(1));
}
db.add(values);
assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
db.gc(clock);
assertEquals(23, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
} | assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount()); | public void testNodeMetricsDb() {
ManualClock clock = new ManualClock();
NodeMetricsDb db = new NodeMetricsDb();
List<NodeMetrics.MetricValue> values = new ArrayList<>();
for (int i = 0; i < 40; i++) {
values.add(new NodeMetrics.MetricValue("host0", "cpu.util", clock.instant().getEpochSecond(), 0.9f));
clock.advance(Duration.ofHours(1));
}
db.add(values);
assertEquals(29, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
db.gc(clock);
assertEquals(23, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.cpu, List.of("host0")).measurementCount());
assertEquals( 0, db.getWindow(clock.instant().minus(Duration.ofHours(30)), Resource.memory, List.of("host0")).measurementCount());
} | class NodeMetricsDbTest {
@Test
} | class NodeMetricsDbTest {
@Test
} |
```suggestion target.ifPresent(t -> log.info("Autoscale: " + application + " " + clusterType + " " + clusterId + ``` | private void autoscale(ApplicationId application, List<Node> applicationNodes) {
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, nodeRepository())) {
if ( ! deployment.isValid()) return;
nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> {
Optional<AllocatableClusterResources> target = autoscaler.autoscale(clusterNodes);
Instant lastLogTime = lastLogged.get(new Pair<>(application, clusterId));
if (lastLogTime == null || lastLogTime.isBefore(nodeRepository().clock().instant().minus(Duration.ofHours(1)))) {
int currentGroups = (int) clusterNodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
ClusterSpec.Type clusterType = clusterNodes.get(0).allocation().get().membership().cluster().type();
target.ifPresent(t -> log.info("Autoscale: " + application + clusterType + " " + clusterId +
" from " + toString(clusterNodes.size(), currentGroups, clusterNodes.get(0).flavor().resources()) +
" to " + toString(t.nodes(), t.groups(), t.advertisedResources())));
lastLogged.put(new Pair<>(application, clusterId), nodeRepository().clock().instant());
}
});
}
} | target.ifPresent(t -> log.info("Autoscale: " + application + clusterType + " " + clusterId + | private void autoscale(ApplicationId application, List<Node> applicationNodes) {
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, nodeRepository())) {
if ( ! deployment.isValid()) return;
nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> autoscale(application, clusterId, clusterNodes));
}
} | class AutoscalingMaintainer extends Maintainer {
private final Autoscaler autoscaler;
private final Deployer deployer;
private final Map<Pair<ApplicationId, ClusterSpec.Id>, Instant> lastLogged = new HashMap<>();
public AutoscalingMaintainer(NodeRepository nodeRepository,
HostResourcesCalculator hostResourcesCalculator,
NodeMetricsDb metricsDb,
Deployer deployer,
Duration interval) {
super(nodeRepository, interval);
this.autoscaler = new Autoscaler(hostResourcesCalculator, metricsDb, nodeRepository);
this.deployer = deployer;
}
@Override
protected void maintain() {
if ( ! nodeRepository().zone().environment().isProduction()) return;
activeNodesByApplication().forEach((applicationId, nodes) -> autoscale(applicationId, nodes));
}
private String toString(int nodes, int groups, NodeResources resources) {
return nodes +
(groups > 1 ? " in " + groups + " groups " : " ") +
" * " + resources +
" (total: " + "[vcpu: " + nodes * resources.vcpu() + ", memory: " + nodes * resources.memoryGb() + " Gb, disk " + nodes * resources.diskGb() + " Gb])";
}
private Map<ClusterSpec.Id, List<Node>> nodesByCluster(List<Node> applicationNodes) {
return applicationNodes.stream().collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster().id()));
}
} | class AutoscalingMaintainer extends Maintainer {
private final Autoscaler autoscaler;
private final Deployer deployer;
private final Map<Pair<ApplicationId, ClusterSpec.Id>, Instant> lastLogged = new HashMap<>();
public AutoscalingMaintainer(NodeRepository nodeRepository,
HostResourcesCalculator hostResourcesCalculator,
NodeMetricsDb metricsDb,
Deployer deployer,
Duration interval) {
super(nodeRepository, interval);
this.autoscaler = new Autoscaler(hostResourcesCalculator, metricsDb, nodeRepository);
this.deployer = deployer;
}
@Override
protected void maintain() {
if ( ! nodeRepository().zone().environment().isProduction()) return;
activeNodesByApplication().forEach((applicationId, nodes) -> autoscale(applicationId, nodes));
}
private void autoscale(ApplicationId application, ClusterSpec.Id clusterId, List<Node> clusterNodes) {
Optional<AllocatableClusterResources> target = autoscaler.autoscale(clusterNodes);
if (target.isEmpty()) return;
Instant lastLogTime = lastLogged.get(new Pair<>(application, clusterId));
if (lastLogTime != null && lastLogTime.isAfter(nodeRepository().clock().instant().minus(Duration.ofHours(1)))) return;
int currentGroups = (int) clusterNodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
ClusterSpec.Type clusterType = clusterNodes.get(0).allocation().get().membership().cluster().type();
log.info("Autoscale: " + application + " " + clusterType + " " + clusterId +
" from " + toString(clusterNodes.size(), currentGroups, clusterNodes.get(0).flavor().resources()) +
" to " + toString(target.get().nodes(), target.get().groups(), target.get().advertisedResources()));
lastLogged.put(new Pair<>(application, clusterId), nodeRepository().clock().instant());
}
private String toString(int nodes, int groups, NodeResources resources) {
return nodes +
(groups > 1 ? " in " + groups + " groups " : " ") +
" * " + resources +
" (total: " + "[vcpu: " + nodes * resources.vcpu() + ", memory: " + nodes * resources.memoryGb() + " Gb, disk " + nodes * resources.diskGb() + " Gb])";
}
private Map<ClusterSpec.Id, List<Node>> nodesByCluster(List<Node> applicationNodes) {
return applicationNodes.stream().collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster().id()));
}
} |
Probably want to skip all of this if target is empty? | private void autoscale(ApplicationId application, List<Node> applicationNodes) {
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, nodeRepository())) {
if ( ! deployment.isValid()) return;
nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> {
Optional<AllocatableClusterResources> target = autoscaler.autoscale(clusterNodes);
Instant lastLogTime = lastLogged.get(new Pair<>(application, clusterId));
if (lastLogTime == null || lastLogTime.isBefore(nodeRepository().clock().instant().minus(Duration.ofHours(1)))) {
int currentGroups = (int) clusterNodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
ClusterSpec.Type clusterType = clusterNodes.get(0).allocation().get().membership().cluster().type();
target.ifPresent(t -> log.info("Autoscale: " + application + clusterType + " " + clusterId +
" from " + toString(clusterNodes.size(), currentGroups, clusterNodes.get(0).flavor().resources()) +
" to " + toString(t.nodes(), t.groups(), t.advertisedResources())));
lastLogged.put(new Pair<>(application, clusterId), nodeRepository().clock().instant());
}
});
}
} | if (lastLogTime == null || lastLogTime.isBefore(nodeRepository().clock().instant().minus(Duration.ofHours(1)))) { | private void autoscale(ApplicationId application, List<Node> applicationNodes) {
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, nodeRepository())) {
if ( ! deployment.isValid()) return;
nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> autoscale(application, clusterId, clusterNodes));
}
} | class AutoscalingMaintainer extends Maintainer {
private final Autoscaler autoscaler;
private final Deployer deployer;
private final Map<Pair<ApplicationId, ClusterSpec.Id>, Instant> lastLogged = new HashMap<>();
public AutoscalingMaintainer(NodeRepository nodeRepository,
HostResourcesCalculator hostResourcesCalculator,
NodeMetricsDb metricsDb,
Deployer deployer,
Duration interval) {
super(nodeRepository, interval);
this.autoscaler = new Autoscaler(hostResourcesCalculator, metricsDb, nodeRepository);
this.deployer = deployer;
}
@Override
protected void maintain() {
if ( ! nodeRepository().zone().environment().isProduction()) return;
activeNodesByApplication().forEach((applicationId, nodes) -> autoscale(applicationId, nodes));
}
private String toString(int nodes, int groups, NodeResources resources) {
return nodes +
(groups > 1 ? " in " + groups + " groups " : " ") +
" * " + resources +
" (total: " + "[vcpu: " + nodes * resources.vcpu() + ", memory: " + nodes * resources.memoryGb() + " Gb, disk " + nodes * resources.diskGb() + " Gb])";
}
private Map<ClusterSpec.Id, List<Node>> nodesByCluster(List<Node> applicationNodes) {
return applicationNodes.stream().collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster().id()));
}
} | class AutoscalingMaintainer extends Maintainer {
private final Autoscaler autoscaler;
private final Deployer deployer;
private final Map<Pair<ApplicationId, ClusterSpec.Id>, Instant> lastLogged = new HashMap<>();
public AutoscalingMaintainer(NodeRepository nodeRepository,
HostResourcesCalculator hostResourcesCalculator,
NodeMetricsDb metricsDb,
Deployer deployer,
Duration interval) {
super(nodeRepository, interval);
this.autoscaler = new Autoscaler(hostResourcesCalculator, metricsDb, nodeRepository);
this.deployer = deployer;
}
@Override
protected void maintain() {
if ( ! nodeRepository().zone().environment().isProduction()) return;
activeNodesByApplication().forEach((applicationId, nodes) -> autoscale(applicationId, nodes));
}
private void autoscale(ApplicationId application, ClusterSpec.Id clusterId, List<Node> clusterNodes) {
Optional<AllocatableClusterResources> target = autoscaler.autoscale(clusterNodes);
if (target.isEmpty()) return;
Instant lastLogTime = lastLogged.get(new Pair<>(application, clusterId));
if (lastLogTime != null && lastLogTime.isAfter(nodeRepository().clock().instant().minus(Duration.ofHours(1)))) return;
int currentGroups = (int) clusterNodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
ClusterSpec.Type clusterType = clusterNodes.get(0).allocation().get().membership().cluster().type();
log.info("Autoscale: " + application + " " + clusterType + " " + clusterId +
" from " + toString(clusterNodes.size(), currentGroups, clusterNodes.get(0).flavor().resources()) +
" to " + toString(target.get().nodes(), target.get().groups(), target.get().advertisedResources()));
lastLogged.put(new Pair<>(application, clusterId), nodeRepository().clock().instant());
}
private String toString(int nodes, int groups, NodeResources resources) {
return nodes +
(groups > 1 ? " in " + groups + " groups " : " ") +
" * " + resources +
" (total: " + "[vcpu: " + nodes * resources.vcpu() + ", memory: " + nodes * resources.memoryGb() + " Gb, disk " + nodes * resources.diskGb() + " Gb])";
}
private Map<ClusterSpec.Id, List<Node>> nodesByCluster(List<Node> applicationNodes) {
return applicationNodes.stream().collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster().id()));
}
} |
It's slightly more messy, but ok | private void autoscale(ApplicationId application, List<Node> applicationNodes) {
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, nodeRepository())) {
if ( ! deployment.isValid()) return;
nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> {
Optional<AllocatableClusterResources> target = autoscaler.autoscale(clusterNodes);
Instant lastLogTime = lastLogged.get(new Pair<>(application, clusterId));
if (lastLogTime == null || lastLogTime.isBefore(nodeRepository().clock().instant().minus(Duration.ofHours(1)))) {
int currentGroups = (int) clusterNodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
ClusterSpec.Type clusterType = clusterNodes.get(0).allocation().get().membership().cluster().type();
target.ifPresent(t -> log.info("Autoscale: " + application + clusterType + " " + clusterId +
" from " + toString(clusterNodes.size(), currentGroups, clusterNodes.get(0).flavor().resources()) +
" to " + toString(t.nodes(), t.groups(), t.advertisedResources())));
lastLogged.put(new Pair<>(application, clusterId), nodeRepository().clock().instant());
}
});
}
} | if (lastLogTime == null || lastLogTime.isBefore(nodeRepository().clock().instant().minus(Duration.ofHours(1)))) { | private void autoscale(ApplicationId application, List<Node> applicationNodes) {
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, nodeRepository())) {
if ( ! deployment.isValid()) return;
nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> autoscale(application, clusterId, clusterNodes));
}
} | class AutoscalingMaintainer extends Maintainer {
private final Autoscaler autoscaler;
private final Deployer deployer;
private final Map<Pair<ApplicationId, ClusterSpec.Id>, Instant> lastLogged = new HashMap<>();
public AutoscalingMaintainer(NodeRepository nodeRepository,
HostResourcesCalculator hostResourcesCalculator,
NodeMetricsDb metricsDb,
Deployer deployer,
Duration interval) {
super(nodeRepository, interval);
this.autoscaler = new Autoscaler(hostResourcesCalculator, metricsDb, nodeRepository);
this.deployer = deployer;
}
@Override
protected void maintain() {
if ( ! nodeRepository().zone().environment().isProduction()) return;
activeNodesByApplication().forEach((applicationId, nodes) -> autoscale(applicationId, nodes));
}
private String toString(int nodes, int groups, NodeResources resources) {
return nodes +
(groups > 1 ? " in " + groups + " groups " : " ") +
" * " + resources +
" (total: " + "[vcpu: " + nodes * resources.vcpu() + ", memory: " + nodes * resources.memoryGb() + " Gb, disk " + nodes * resources.diskGb() + " Gb])";
}
private Map<ClusterSpec.Id, List<Node>> nodesByCluster(List<Node> applicationNodes) {
return applicationNodes.stream().collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster().id()));
}
} | class AutoscalingMaintainer extends Maintainer {
private final Autoscaler autoscaler;
private final Deployer deployer;
private final Map<Pair<ApplicationId, ClusterSpec.Id>, Instant> lastLogged = new HashMap<>();
public AutoscalingMaintainer(NodeRepository nodeRepository,
HostResourcesCalculator hostResourcesCalculator,
NodeMetricsDb metricsDb,
Deployer deployer,
Duration interval) {
super(nodeRepository, interval);
this.autoscaler = new Autoscaler(hostResourcesCalculator, metricsDb, nodeRepository);
this.deployer = deployer;
}
@Override
protected void maintain() {
if ( ! nodeRepository().zone().environment().isProduction()) return;
activeNodesByApplication().forEach((applicationId, nodes) -> autoscale(applicationId, nodes));
}
private void autoscale(ApplicationId application, ClusterSpec.Id clusterId, List<Node> clusterNodes) {
Optional<AllocatableClusterResources> target = autoscaler.autoscale(clusterNodes);
if (target.isEmpty()) return;
Instant lastLogTime = lastLogged.get(new Pair<>(application, clusterId));
if (lastLogTime != null && lastLogTime.isAfter(nodeRepository().clock().instant().minus(Duration.ofHours(1)))) return;
int currentGroups = (int) clusterNodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
ClusterSpec.Type clusterType = clusterNodes.get(0).allocation().get().membership().cluster().type();
log.info("Autoscale: " + application + " " + clusterType + " " + clusterId +
" from " + toString(clusterNodes.size(), currentGroups, clusterNodes.get(0).flavor().resources()) +
" to " + toString(target.get().nodes(), target.get().groups(), target.get().advertisedResources()));
lastLogged.put(new Pair<>(application, clusterId), nodeRepository().clock().instant());
}
private String toString(int nodes, int groups, NodeResources resources) {
return nodes +
(groups > 1 ? " in " + groups + " groups " : " ") +
" * " + resources +
" (total: " + "[vcpu: " + nodes * resources.vcpu() + ", memory: " + nodes * resources.memoryGb() + " Gb, disk " + nodes * resources.diskGb() + " Gb])";
}
private Map<ClusterSpec.Id, List<Node>> nodesByCluster(List<Node> applicationNodes) {
return applicationNodes.stream().collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster().id()));
}
} |
will this fail the first redeploy (after config server is upgraded) ? or will this only be evaluated when application is deployed with the new version? | private static AthenzDomain getAccessControlDomain(DeployState deployState, Element accessControlElem) {
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
AthenzDomain explicitDomain = XmlHelper.getOptionalAttribute(accessControlElem, "domain")
.map(AthenzDomain::from)
.orElse(null);
if (tenantDomain == null) {
if (explicitDomain == null) {
throw new IllegalStateException("No Athenz domain provided for 'access-control'");
}
deployState.getDeployLogger().log(Level.WARNING, "Athenz tenant is not provided by deploy call. This will soon be handled as failure.");
}
if (explicitDomain != null) {
if (explicitDomain.equals(tenantDomain)) {
throw new IllegalArgumentException(
String.format("Domain in access-control ('%s') does not match tenant domain ('%s')", tenantDomain, explicitDomain));
}
deployState.getDeployLogger().log(Level.WARNING, "Domain in 'access-control' is deprecated and will be removed soon");
}
return tenantDomain != null ? tenantDomain : explicitDomain;
} | AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); | private static AthenzDomain getAccessControlDomain(DeployState deployState, Element accessControlElem) {
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
AthenzDomain explicitDomain = XmlHelper.getOptionalAttribute(accessControlElem, "domain")
.map(AthenzDomain::from)
.orElse(null);
if (tenantDomain == null) {
if (explicitDomain == null) {
throw new IllegalStateException("No Athenz domain provided for 'access-control'");
}
deployState.getDeployLogger().log(Level.WARNING, "Athenz tenant is not provided by deploy call. This will soon be handled as failure.");
}
if (explicitDomain != null) {
if (tenantDomain != null && !explicitDomain.equals(tenantDomain)) {
throw new IllegalArgumentException(
String.format("Domain in access-control ('%s') does not match tenant domain ('%s')", tenantDomain, explicitDomain));
}
deployState.getDeployLogger().log(Level.WARNING, "Domain in 'access-control' is deprecated and will be removed soon");
}
return tenantDomain != null ? tenantDomain : explicitDomain;
} | class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Http> {
@Override
protected Http doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element spec) {
FilterChains filterChains;
List<Binding> bindings = new ArrayList<>();
AccessControl accessControl = null;
Element filteringElem = XML.getChild(spec, "filtering");
if (filteringElem != null) {
filterChains = new FilterChainsBuilder().build(deployState, ancestor, filteringElem);
bindings = readFilterBindings(filteringElem, deployState.getDeployLogger());
Element accessControlElem = XML.getChild(filteringElem, "access-control");
if (accessControlElem != null) {
accessControl = buildAccessControl(deployState, ancestor, accessControlElem);
bindings.addAll(accessControl.getBindings());
filterChains.add(new Chain<>(FilterChains.emptyChainSpec(ACCESS_CONTROL_CHAIN_ID)));
}
} else {
filterChains = new FilterChainsBuilder().newChainsInstance(ancestor);
}
Http http = new Http(bindings, accessControl);
http.setFilterChains(filterChains);
buildHttpServers(deployState, ancestor, http, spec);
return http;
}
private AccessControl buildAccessControl(DeployState deployState, AbstractConfigProducer ancestor, Element accessControlElem) {
AthenzDomain domain = getAccessControlDomain(deployState, accessControlElem);
AccessControl.Builder builder = new AccessControl.Builder(domain.value(), deployState.getDeployLogger());
getContainerCluster(ancestor).ifPresent(builder::setHandlers);
XmlHelper.getOptionalAttribute(accessControlElem, "read").ifPresent(
readAttr -> builder.readEnabled(Boolean.valueOf(readAttr)));
XmlHelper.getOptionalAttribute(accessControlElem, "write").ifPresent(
writeAttr -> builder.writeEnabled(Boolean.valueOf(writeAttr)));
Element excludeElem = XML.getChild(accessControlElem, "exclude");
if (excludeElem != null) {
XML.getChildren(excludeElem, "binding").stream()
.map(XML::getValue)
.forEach(builder::excludeBinding);
}
return builder.build();
}
private static Optional<ApplicationContainerCluster> getContainerCluster(AbstractConfigProducer configProducer) {
AbstractConfigProducer currentProducer = configProducer;
while (! ApplicationContainerCluster.class.isAssignableFrom(currentProducer.getClass())) {
currentProducer = currentProducer.getParent();
if (currentProducer == null)
return Optional.empty();
}
return Optional.of((ApplicationContainerCluster) currentProducer);
}
private List<Binding> readFilterBindings(Element filteringSpec, DeployLogger logger) {
List<Binding> result = new ArrayList<>();
for (Element child: XML.getChildren(filteringSpec)) {
String tagName = child.getTagName();
if ((tagName.equals("request-chain") || tagName.equals("response-chain"))) {
ComponentSpecification chainId = XmlHelper.getIdRef(child);
for (Element bindingSpec: XML.getChildren(child, "binding")) {
String binding = XML.getValue(bindingSpec);
result.add(Binding.create(chainId, binding, logger));
}
}
}
return result;
}
private void buildHttpServers(DeployState deployState, AbstractConfigProducer ancestor, Http http, Element spec) {
http.setHttpServer(new JettyHttpServerBuilder().build(deployState, ancestor, spec));
}
static int readPort(ModelElement spec, boolean isHosted, DeployLogger logger) {
Integer port = spec.integerAttribute("port");
if (port == null)
return Defaults.getDefaults().vespaWebServicePort();
if (port < 0)
throw new IllegalArgumentException("Invalid port " + port);
int legalPortInHostedVespa = Container.BASEPORT;
if (isHosted && port != legalPortInHostedVespa && ! spec.booleanAttribute("required", false)) {
throw new IllegalArgumentException("Illegal port " + port + " in http server '" +
spec.stringAttribute("id") + "'" +
": Port must be set to " + legalPortInHostedVespa);
}
return port;
}
} | class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Http> {
@Override
protected Http doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element spec) {
FilterChains filterChains;
List<Binding> bindings = new ArrayList<>();
AccessControl accessControl = null;
Element filteringElem = XML.getChild(spec, "filtering");
if (filteringElem != null) {
filterChains = new FilterChainsBuilder().build(deployState, ancestor, filteringElem);
bindings = readFilterBindings(filteringElem, deployState.getDeployLogger());
Element accessControlElem = XML.getChild(filteringElem, "access-control");
if (accessControlElem != null) {
accessControl = buildAccessControl(deployState, ancestor, accessControlElem);
bindings.addAll(accessControl.getBindings());
filterChains.add(new Chain<>(FilterChains.emptyChainSpec(ACCESS_CONTROL_CHAIN_ID)));
}
} else {
filterChains = new FilterChainsBuilder().newChainsInstance(ancestor);
}
Http http = new Http(bindings, accessControl);
http.setFilterChains(filterChains);
buildHttpServers(deployState, ancestor, http, spec);
return http;
}
private AccessControl buildAccessControl(DeployState deployState, AbstractConfigProducer ancestor, Element accessControlElem) {
AthenzDomain domain = getAccessControlDomain(deployState, accessControlElem);
AccessControl.Builder builder = new AccessControl.Builder(domain.value(), deployState.getDeployLogger());
getContainerCluster(ancestor).ifPresent(builder::setHandlers);
XmlHelper.getOptionalAttribute(accessControlElem, "read").ifPresent(
readAttr -> builder.readEnabled(Boolean.valueOf(readAttr)));
XmlHelper.getOptionalAttribute(accessControlElem, "write").ifPresent(
writeAttr -> builder.writeEnabled(Boolean.valueOf(writeAttr)));
Element excludeElem = XML.getChild(accessControlElem, "exclude");
if (excludeElem != null) {
XML.getChildren(excludeElem, "binding").stream()
.map(XML::getValue)
.forEach(builder::excludeBinding);
}
return builder.build();
}
private static Optional<ApplicationContainerCluster> getContainerCluster(AbstractConfigProducer configProducer) {
AbstractConfigProducer currentProducer = configProducer;
while (! ApplicationContainerCluster.class.isAssignableFrom(currentProducer.getClass())) {
currentProducer = currentProducer.getParent();
if (currentProducer == null)
return Optional.empty();
}
return Optional.of((ApplicationContainerCluster) currentProducer);
}
private List<Binding> readFilterBindings(Element filteringSpec, DeployLogger logger) {
List<Binding> result = new ArrayList<>();
for (Element child: XML.getChildren(filteringSpec)) {
String tagName = child.getTagName();
if ((tagName.equals("request-chain") || tagName.equals("response-chain"))) {
ComponentSpecification chainId = XmlHelper.getIdRef(child);
for (Element bindingSpec: XML.getChildren(child, "binding")) {
String binding = XML.getValue(bindingSpec);
result.add(Binding.create(chainId, binding, logger));
}
}
}
return result;
}
private void buildHttpServers(DeployState deployState, AbstractConfigProducer ancestor, Http http, Element spec) {
http.setHttpServer(new JettyHttpServerBuilder().build(deployState, ancestor, spec));
}
static int readPort(ModelElement spec, boolean isHosted, DeployLogger logger) {
Integer port = spec.integerAttribute("port");
if (port == null)
return Defaults.getDefaults().vespaWebServicePort();
if (port < 0)
throw new IllegalArgumentException("Invalid port " + port);
int legalPortInHostedVespa = Container.BASEPORT;
if (isHosted && port != legalPortInHostedVespa && ! spec.booleanAttribute("required", false)) {
throw new IllegalArgumentException("Illegal port " + port + " in http server '" +
spec.stringAttribute("id") + "'" +
": Port must be set to " + legalPortInHostedVespa);
}
return port;
}
} |
Only if `domain` attribute is missing from `access-control`. This attribute has been mandatory up to now, so it should not be the case. | private static AthenzDomain getAccessControlDomain(DeployState deployState, Element accessControlElem) {
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
AthenzDomain explicitDomain = XmlHelper.getOptionalAttribute(accessControlElem, "domain")
.map(AthenzDomain::from)
.orElse(null);
if (tenantDomain == null) {
if (explicitDomain == null) {
throw new IllegalStateException("No Athenz domain provided for 'access-control'");
}
deployState.getDeployLogger().log(Level.WARNING, "Athenz tenant is not provided by deploy call. This will soon be handled as failure.");
}
if (explicitDomain != null) {
if (explicitDomain.equals(tenantDomain)) {
throw new IllegalArgumentException(
String.format("Domain in access-control ('%s') does not match tenant domain ('%s')", tenantDomain, explicitDomain));
}
deployState.getDeployLogger().log(Level.WARNING, "Domain in 'access-control' is deprecated and will be removed soon");
}
return tenantDomain != null ? tenantDomain : explicitDomain;
} | AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); | private static AthenzDomain getAccessControlDomain(DeployState deployState, Element accessControlElem) {
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
AthenzDomain explicitDomain = XmlHelper.getOptionalAttribute(accessControlElem, "domain")
.map(AthenzDomain::from)
.orElse(null);
if (tenantDomain == null) {
if (explicitDomain == null) {
throw new IllegalStateException("No Athenz domain provided for 'access-control'");
}
deployState.getDeployLogger().log(Level.WARNING, "Athenz tenant is not provided by deploy call. This will soon be handled as failure.");
}
if (explicitDomain != null) {
if (tenantDomain != null && !explicitDomain.equals(tenantDomain)) {
throw new IllegalArgumentException(
String.format("Domain in access-control ('%s') does not match tenant domain ('%s')", tenantDomain, explicitDomain));
}
deployState.getDeployLogger().log(Level.WARNING, "Domain in 'access-control' is deprecated and will be removed soon");
}
return tenantDomain != null ? tenantDomain : explicitDomain;
} | class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Http> {
@Override
protected Http doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element spec) {
FilterChains filterChains;
List<Binding> bindings = new ArrayList<>();
AccessControl accessControl = null;
Element filteringElem = XML.getChild(spec, "filtering");
if (filteringElem != null) {
filterChains = new FilterChainsBuilder().build(deployState, ancestor, filteringElem);
bindings = readFilterBindings(filteringElem, deployState.getDeployLogger());
Element accessControlElem = XML.getChild(filteringElem, "access-control");
if (accessControlElem != null) {
accessControl = buildAccessControl(deployState, ancestor, accessControlElem);
bindings.addAll(accessControl.getBindings());
filterChains.add(new Chain<>(FilterChains.emptyChainSpec(ACCESS_CONTROL_CHAIN_ID)));
}
} else {
filterChains = new FilterChainsBuilder().newChainsInstance(ancestor);
}
Http http = new Http(bindings, accessControl);
http.setFilterChains(filterChains);
buildHttpServers(deployState, ancestor, http, spec);
return http;
}
private AccessControl buildAccessControl(DeployState deployState, AbstractConfigProducer ancestor, Element accessControlElem) {
AthenzDomain domain = getAccessControlDomain(deployState, accessControlElem);
AccessControl.Builder builder = new AccessControl.Builder(domain.value(), deployState.getDeployLogger());
getContainerCluster(ancestor).ifPresent(builder::setHandlers);
XmlHelper.getOptionalAttribute(accessControlElem, "read").ifPresent(
readAttr -> builder.readEnabled(Boolean.valueOf(readAttr)));
XmlHelper.getOptionalAttribute(accessControlElem, "write").ifPresent(
writeAttr -> builder.writeEnabled(Boolean.valueOf(writeAttr)));
Element excludeElem = XML.getChild(accessControlElem, "exclude");
if (excludeElem != null) {
XML.getChildren(excludeElem, "binding").stream()
.map(XML::getValue)
.forEach(builder::excludeBinding);
}
return builder.build();
}
private static Optional<ApplicationContainerCluster> getContainerCluster(AbstractConfigProducer configProducer) {
AbstractConfigProducer currentProducer = configProducer;
while (! ApplicationContainerCluster.class.isAssignableFrom(currentProducer.getClass())) {
currentProducer = currentProducer.getParent();
if (currentProducer == null)
return Optional.empty();
}
return Optional.of((ApplicationContainerCluster) currentProducer);
}
private List<Binding> readFilterBindings(Element filteringSpec, DeployLogger logger) {
List<Binding> result = new ArrayList<>();
for (Element child: XML.getChildren(filteringSpec)) {
String tagName = child.getTagName();
if ((tagName.equals("request-chain") || tagName.equals("response-chain"))) {
ComponentSpecification chainId = XmlHelper.getIdRef(child);
for (Element bindingSpec: XML.getChildren(child, "binding")) {
String binding = XML.getValue(bindingSpec);
result.add(Binding.create(chainId, binding, logger));
}
}
}
return result;
}
private void buildHttpServers(DeployState deployState, AbstractConfigProducer ancestor, Http http, Element spec) {
http.setHttpServer(new JettyHttpServerBuilder().build(deployState, ancestor, spec));
}
static int readPort(ModelElement spec, boolean isHosted, DeployLogger logger) {
Integer port = spec.integerAttribute("port");
if (port == null)
return Defaults.getDefaults().vespaWebServicePort();
if (port < 0)
throw new IllegalArgumentException("Invalid port " + port);
int legalPortInHostedVespa = Container.BASEPORT;
if (isHosted && port != legalPortInHostedVespa && ! spec.booleanAttribute("required", false)) {
throw new IllegalArgumentException("Illegal port " + port + " in http server '" +
spec.stringAttribute("id") + "'" +
": Port must be set to " + legalPortInHostedVespa);
}
return port;
}
} | class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Http> {
@Override
protected Http doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element spec) {
FilterChains filterChains;
List<Binding> bindings = new ArrayList<>();
AccessControl accessControl = null;
Element filteringElem = XML.getChild(spec, "filtering");
if (filteringElem != null) {
filterChains = new FilterChainsBuilder().build(deployState, ancestor, filteringElem);
bindings = readFilterBindings(filteringElem, deployState.getDeployLogger());
Element accessControlElem = XML.getChild(filteringElem, "access-control");
if (accessControlElem != null) {
accessControl = buildAccessControl(deployState, ancestor, accessControlElem);
bindings.addAll(accessControl.getBindings());
filterChains.add(new Chain<>(FilterChains.emptyChainSpec(ACCESS_CONTROL_CHAIN_ID)));
}
} else {
filterChains = new FilterChainsBuilder().newChainsInstance(ancestor);
}
Http http = new Http(bindings, accessControl);
http.setFilterChains(filterChains);
buildHttpServers(deployState, ancestor, http, spec);
return http;
}
private AccessControl buildAccessControl(DeployState deployState, AbstractConfigProducer ancestor, Element accessControlElem) {
AthenzDomain domain = getAccessControlDomain(deployState, accessControlElem);
AccessControl.Builder builder = new AccessControl.Builder(domain.value(), deployState.getDeployLogger());
getContainerCluster(ancestor).ifPresent(builder::setHandlers);
XmlHelper.getOptionalAttribute(accessControlElem, "read").ifPresent(
readAttr -> builder.readEnabled(Boolean.valueOf(readAttr)));
XmlHelper.getOptionalAttribute(accessControlElem, "write").ifPresent(
writeAttr -> builder.writeEnabled(Boolean.valueOf(writeAttr)));
Element excludeElem = XML.getChild(accessControlElem, "exclude");
if (excludeElem != null) {
XML.getChildren(excludeElem, "binding").stream()
.map(XML::getValue)
.forEach(builder::excludeBinding);
}
return builder.build();
}
private static Optional<ApplicationContainerCluster> getContainerCluster(AbstractConfigProducer configProducer) {
AbstractConfigProducer currentProducer = configProducer;
while (! ApplicationContainerCluster.class.isAssignableFrom(currentProducer.getClass())) {
currentProducer = currentProducer.getParent();
if (currentProducer == null)
return Optional.empty();
}
return Optional.of((ApplicationContainerCluster) currentProducer);
}
private List<Binding> readFilterBindings(Element filteringSpec, DeployLogger logger) {
List<Binding> result = new ArrayList<>();
for (Element child: XML.getChildren(filteringSpec)) {
String tagName = child.getTagName();
if ((tagName.equals("request-chain") || tagName.equals("response-chain"))) {
ComponentSpecification chainId = XmlHelper.getIdRef(child);
for (Element bindingSpec: XML.getChildren(child, "binding")) {
String binding = XML.getValue(bindingSpec);
result.add(Binding.create(chainId, binding, logger));
}
}
}
return result;
}
private void buildHttpServers(DeployState deployState, AbstractConfigProducer ancestor, Http http, Element spec) {
http.setHttpServer(new JettyHttpServerBuilder().build(deployState, ancestor, spec));
}
static int readPort(ModelElement spec, boolean isHosted, DeployLogger logger) {
Integer port = spec.integerAttribute("port");
if (port == null)
return Defaults.getDefaults().vespaWebServicePort();
if (port < 0)
throw new IllegalArgumentException("Invalid port " + port);
int legalPortInHostedVespa = Container.BASEPORT;
if (isHosted && port != legalPortInHostedVespa && ! spec.booleanAttribute("required", false)) {
throw new IllegalArgumentException("Illegal port " + port + " in http server '" +
spec.stringAttribute("id") + "'" +
": Port must be set to " + legalPortInHostedVespa);
}
return port;
}
} |
Thanks | public void relevant_information_from_deprovisioned_hosts_are_merged_into_readded_host() {
NodeRepositoryTester tester = new NodeRepositoryTester();
Instant testStart = tester.nodeRepository().clock().instant();
tester.clock().advance(Duration.ofSeconds(1));
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
assertFalse(tester.nodeRepository().getNode("host1").get().history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
Node host1 = tester.nodeRepository().getNode("host1").get();
host1 = host1.withWantToRetire(true, Agent.system, tester.nodeRepository().clock().instant());
host1 = host1.with(host1.status().withWantToDeprovision(true));
host1 = host1.withFirmwareVerifiedAt(tester.clock().instant());
host1 = host1.with(host1.status().withIncreasedFailCount());
host1 = host1.with(host1.reports().withReport(Report.basicReport("id", Report.Type.HARD_FAIL, tester.clock().instant(), "Test report")));
tester.nodeRepository().write(host1, tester.nodeRepository().lock(host1));
tester.nodeRepository().removeRecursively("host1");
host1 = tester.nodeRepository().getNode("host1").get();
assertEquals(Node.State.deprovisioned, host1.state());
assertTrue(host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
tester.addNode("id2", "host1", "default", NodeType.host);
host1 = tester.nodeRepository().getNode("host1").get();
assertEquals("This is the newly added node", "id2", host1.id());
assertFalse("The old 'host1' is removed",
tester.nodeRepository().getNode("host1", Node.State.deprovisioned).isPresent());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToRetire());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToDeprovision());
assertTrue("Transferred from deprovisioned host", host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
assertTrue("Transferred from deprovisioned host", host1.status().firmwareVerifiedAt().isPresent());
assertEquals("Transferred from deprovisioned host", 1, host1.status().failCount());
assertEquals("Transferred from deprovisioned host", 1, host1.reports().getReports().size());
} | public void relevant_information_from_deprovisioned_hosts_are_merged_into_readded_host() {
NodeRepositoryTester tester = new NodeRepositoryTester();
Instant testStart = tester.nodeRepository().clock().instant();
tester.clock().advance(Duration.ofSeconds(1));
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
assertFalse(tester.nodeRepository().getNode("host1").get().history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
Node host1 = tester.nodeRepository().getNode("host1").get();
host1 = host1.withWantToRetire(true, Agent.system, tester.nodeRepository().clock().instant());
host1 = host1.with(host1.status().withWantToDeprovision(true));
host1 = host1.withFirmwareVerifiedAt(tester.clock().instant());
host1 = host1.with(host1.status().withIncreasedFailCount());
host1 = host1.with(host1.reports().withReport(Report.basicReport("id", Report.Type.HARD_FAIL, tester.clock().instant(), "Test report")));
tester.nodeRepository().write(host1, tester.nodeRepository().lock(host1));
tester.nodeRepository().removeRecursively("host1");
host1 = tester.nodeRepository().getNode("host1").get();
assertEquals(Node.State.deprovisioned, host1.state());
assertTrue(host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
tester.addNode("id2", "host1", "default", NodeType.host);
host1 = tester.nodeRepository().getNode("host1").get();
assertEquals("This is the newly added node", "id2", host1.id());
assertFalse("The old 'host1' is removed",
tester.nodeRepository().getNode("host1", Node.State.deprovisioned).isPresent());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToRetire());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToDeprovision());
assertTrue("Transferred from deprovisioned host", host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
assertTrue("Transferred from deprovisioned host", host1.status().firmwareVerifiedAt().isPresent());
assertEquals("Transferred from deprovisioned host", 1, host1.status().failCount());
assertEquals("Transferred from deprovisioned host", 1, host1.reports().getReports().size());
} | class NodeRepositoryTest {
@Test
public void nodeRepositoryTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertEquals(0, tester.nodeRepository().getNodes().size());
tester.addNode("id1", "host1", "default", NodeType.tenant);
tester.addNode("id2", "host2", "default", NodeType.tenant);
tester.addNode("id3", "host3", "default", NodeType.tenant);
assertEquals(3, tester.nodeRepository().getNodes().size());
tester.nodeRepository().park("host2", true, Agent.system, "Parking to unit test");
tester.nodeRepository().removeRecursively("host2");
assertEquals(2, tester.nodeRepository().getNodes().size());
}
@Test
public void only_allow_docker_containers_remove_in_ready() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "docker", NodeType.tenant);
try {
tester.nodeRepository().removeRecursively("host1");
fail("Should not be able to delete docker container node by itself in state provisioned");
} catch (IllegalArgumentException ignored) {
}
tester.nodeRepository().setReady("host1", Agent.system, getClass().getSimpleName());
tester.nodeRepository().removeRecursively("host1");
}
@Test
public void only_remove_tenant_docker_containers_for_new_allocations() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("host1", "host1", "default", NodeType.tenant);
tester.addNode("host2", "host2", "docker", NodeType.tenant);
tester.addNode("cfg1", "cfg1", "docker", NodeType.config);
tester.setNodeState("host1", Node.State.dirty);
tester.setNodeState("host2", Node.State.dirty);
tester.setNodeState("cfg1", Node.State.dirty);
tester.nodeRepository().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("host1").get().state());
tester.nodeRepository().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName());
assertFalse(tester.nodeRepository().getNode("host2").isPresent());
tester.nodeRepository().markNodeAvailableForNewAllocation("cfg1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("cfg1").get().state());
}
@Test
public void fail_readying_with_hard_fail() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("host1", "host1", "default", NodeType.tenant);
tester.addNode("host2", "host2", "default", NodeType.tenant);
tester.setNodeState("host1", Node.State.dirty);
tester.setNodeState("host2", Node.State.dirty);
Node node2 = tester.nodeRepository().getNode("host2").orElseThrow();
var reportsBuilder = new Reports.Builder(node2.reports());
reportsBuilder.setReport(Report.basicReport("reportId", Report.Type.HARD_FAIL, Instant.EPOCH, "hardware failure"));
node2 = node2.with(reportsBuilder.build());
tester.nodeRepository().write(node2, () -> {});
tester.nodeRepository().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("host1").get().state());
try {
tester.nodeRepository().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName());
fail();
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("hardware failure"));
}
}
@Test
public void delete_host_only_after_all_the_children_have_been_deleted() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
tester.addNode("node10", "node10", "host1", "docker", NodeType.tenant);
tester.addNode("node11", "node11", "host1", "docker", NodeType.tenant);
tester.addNode("node12", "node12", "host1", "docker", NodeType.tenant);
tester.addNode("node20", "node20", "host2", "docker", NodeType.tenant);
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.setNodeState("node11", Node.State.active);
try {
tester.nodeRepository().removeRecursively("host1");
fail("Should not be able to delete host node, one of the children is in state active");
} catch (IllegalArgumentException ignored) {
}
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.nodeRepository().removeRecursively("host2");
assertEquals(5, tester.nodeRepository().getNodes().size());
assertEquals(Node.State.deprovisioned, tester.nodeRepository().getNode("host2").get().state());
tester.nodeRepository().fail("node11", Agent.system, getClass().getSimpleName());
tester.nodeRepository().setReady("node12", Agent.system, getClass().getSimpleName());
tester.nodeRepository().removeRecursively("node12");
assertEquals(4, tester.nodeRepository().getNodes().size());
tester.nodeRepository().removeRecursively("host1");
assertEquals(Node.State.deprovisioned, tester.nodeRepository().getNode("host1").get().state());
}
@Test
@Test
public void dirty_host_only_if_we_can_dirty_children() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
tester.addNode("node10", "node10", "host1", "docker", NodeType.tenant);
tester.addNode("node11", "node11", "host1", "docker", NodeType.tenant);
tester.addNode("node12", "node12", "host1", "docker", NodeType.tenant);
tester.addNode("node20", "node20", "host2", "docker", NodeType.tenant);
tester.setNodeState("node11", Node.State.ready);
tester.setNodeState("node12", Node.State.active);
tester.setNodeState("node20", Node.State.failed);
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.nodeRepository().dirtyRecursively("host2", Agent.system, NodeRepositoryTest.class.getSimpleName());
assertEquals(asSet("host2", "node20"), filterNodes(tester, node -> node.state() == Node.State.dirty));
try {
tester.nodeRepository().dirtyRecursively("host1", Agent.system, NodeRepositoryTest.class.getSimpleName());
fail("Should not be able to dirty host1");
} catch (IllegalArgumentException ignored) { }
assertEquals(asSet("host2", "node20"), filterNodes(tester, node -> node.state() == Node.State.dirty));
}
private static Set<String> asSet(String... elements) {
return new HashSet<>(Arrays.asList(elements));
}
private static Set<String> filterNodes(NodeRepositoryTester tester, Predicate<Node> filter) {
return tester.nodeRepository()
.getNodes().stream()
.filter(filter)
.map(Node::hostname)
.collect(Collectors.toSet());
}
} | class NodeRepositoryTest {
@Test
public void nodeRepositoryTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertEquals(0, tester.nodeRepository().getNodes().size());
tester.addNode("id1", "host1", "default", NodeType.tenant);
tester.addNode("id2", "host2", "default", NodeType.tenant);
tester.addNode("id3", "host3", "default", NodeType.tenant);
assertEquals(3, tester.nodeRepository().getNodes().size());
tester.nodeRepository().park("host2", true, Agent.system, "Parking to unit test");
tester.nodeRepository().removeRecursively("host2");
assertEquals(2, tester.nodeRepository().getNodes().size());
}
@Test
public void only_allow_docker_containers_remove_in_ready() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "docker", NodeType.tenant);
try {
tester.nodeRepository().removeRecursively("host1");
fail("Should not be able to delete docker container node by itself in state provisioned");
} catch (IllegalArgumentException ignored) {
}
tester.nodeRepository().setReady("host1", Agent.system, getClass().getSimpleName());
tester.nodeRepository().removeRecursively("host1");
}
@Test
public void only_remove_tenant_docker_containers_for_new_allocations() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("host1", "host1", "default", NodeType.tenant);
tester.addNode("host2", "host2", "docker", NodeType.tenant);
tester.addNode("cfg1", "cfg1", "docker", NodeType.config);
tester.setNodeState("host1", Node.State.dirty);
tester.setNodeState("host2", Node.State.dirty);
tester.setNodeState("cfg1", Node.State.dirty);
tester.nodeRepository().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("host1").get().state());
tester.nodeRepository().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName());
assertFalse(tester.nodeRepository().getNode("host2").isPresent());
tester.nodeRepository().markNodeAvailableForNewAllocation("cfg1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("cfg1").get().state());
}
@Test
public void fail_readying_with_hard_fail() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("host1", "host1", "default", NodeType.tenant);
tester.addNode("host2", "host2", "default", NodeType.tenant);
tester.setNodeState("host1", Node.State.dirty);
tester.setNodeState("host2", Node.State.dirty);
Node node2 = tester.nodeRepository().getNode("host2").orElseThrow();
var reportsBuilder = new Reports.Builder(node2.reports());
reportsBuilder.setReport(Report.basicReport("reportId", Report.Type.HARD_FAIL, Instant.EPOCH, "hardware failure"));
node2 = node2.with(reportsBuilder.build());
tester.nodeRepository().write(node2, () -> {});
tester.nodeRepository().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName());
assertEquals(Node.State.ready, tester.nodeRepository().getNode("host1").get().state());
try {
tester.nodeRepository().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName());
fail();
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("hardware failure"));
}
}
@Test
public void delete_host_only_after_all_the_children_have_been_deleted() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
tester.addNode("node10", "node10", "host1", "docker", NodeType.tenant);
tester.addNode("node11", "node11", "host1", "docker", NodeType.tenant);
tester.addNode("node12", "node12", "host1", "docker", NodeType.tenant);
tester.addNode("node20", "node20", "host2", "docker", NodeType.tenant);
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.setNodeState("node11", Node.State.active);
try {
tester.nodeRepository().removeRecursively("host1");
fail("Should not be able to delete host node, one of the children is in state active");
} catch (IllegalArgumentException ignored) {
}
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.nodeRepository().removeRecursively("host2");
assertEquals(5, tester.nodeRepository().getNodes().size());
assertEquals(Node.State.deprovisioned, tester.nodeRepository().getNode("host2").get().state());
tester.nodeRepository().fail("node11", Agent.system, getClass().getSimpleName());
tester.nodeRepository().setReady("node12", Agent.system, getClass().getSimpleName());
tester.nodeRepository().removeRecursively("node12");
assertEquals(4, tester.nodeRepository().getNodes().size());
tester.nodeRepository().removeRecursively("host1");
assertEquals(Node.State.deprovisioned, tester.nodeRepository().getNode("host1").get().state());
}
@Test
@Test
public void dirty_host_only_if_we_can_dirty_children() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "default", NodeType.host);
tester.addNode("node10", "node10", "host1", "docker", NodeType.tenant);
tester.addNode("node11", "node11", "host1", "docker", NodeType.tenant);
tester.addNode("node12", "node12", "host1", "docker", NodeType.tenant);
tester.addNode("node20", "node20", "host2", "docker", NodeType.tenant);
tester.setNodeState("node11", Node.State.ready);
tester.setNodeState("node12", Node.State.active);
tester.setNodeState("node20", Node.State.failed);
assertEquals(6, tester.nodeRepository().getNodes().size());
tester.nodeRepository().dirtyRecursively("host2", Agent.system, NodeRepositoryTest.class.getSimpleName());
assertEquals(asSet("host2", "node20"), filterNodes(tester, node -> node.state() == Node.State.dirty));
try {
tester.nodeRepository().dirtyRecursively("host1", Agent.system, NodeRepositoryTest.class.getSimpleName());
fail("Should not be able to dirty host1");
} catch (IllegalArgumentException ignored) { }
assertEquals(asSet("host2", "node20"), filterNodes(tester, node -> node.state() == Node.State.dirty));
}
private static Set<String> asSet(String... elements) {
return new HashSet<>(Arrays.asList(elements));
}
private static Set<String> filterNodes(NodeRepositoryTester tester, Predicate<Node> filter) {
return tester.nodeRepository()
.getNodes().stream()
.filter(filter)
.map(Node::hostname)
.collect(Collectors.toSet());
}
} | |
I realize the deserializer handles empty string, but consider just to serializing dockerImageRepo if it's not set. | private void toSlime(Allocation allocation, Cursor object) {
toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.isRemovable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
object.setString(wantedDockerImageRepoKey, allocation.membership().cluster().dockerImageRepo().orElse(""));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
} | object.setString(wantedDockerImageRepoKey, allocation.membership().cluster().dockerImageRepo().orElse("")); | private void toSlime(Allocation allocation, Cursor object) {
toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.isRemovable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedDockerImageRepoKey, repo));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String idKey = "openStackId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentDockerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
private static final String bandwidthKey = "bandwidth";
private static final String diskSpeedKey = "diskSpeed";
private static final String storageTypeKey = "storageType";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedDockerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey), IP.Config::require);
toSlime(node.ipConfig().pool().asSet(), object.setArray(ipAddressPoolKey), UnaryOperator.identity() /* Pool already holds a validated address list */);
object.setString(idKey, node.id());
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().dockerImage().ifPresent(image -> object.setString(currentDockerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history(), object.setArray(historyKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(NodeResources resources, Cursor resourcesObject) {
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
resourcesObject.setDouble(bandwidthKey, resources.bandwidthGbps());
resourcesObject.setString(diskSpeedKey, diskSpeedToString(resources.diskSpeed()));
resourcesObject.setString(storageTypeKey, storageTypeToString(resources.storageType()));
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events())
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(Set<String> ipAddresses, Cursor array, UnaryOperator<Set<String>> validator) {
validator.apply(ipAddresses).stream().sorted(IP.NATURAL_ORDER).forEach(array::addString);
}
public Node fromJson(Node.State state, byte[] data) {
return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Node.State state, Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
new IP.Config(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey)),
object.field(hostnameKey).asString(),
parentHostnameFromSlime(object),
flavor,
statusFromSlime(object),
state,
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object.field(historyKey)),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
modelNameFromSlime(object),
reservedToFromSlime(object.field(reservedToKey)));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
dockerImageFromSlime(object.field(currentDockerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
instantFromSlime(object.field(firmwareCheckKey)));
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(resourcesFromSlime(resources).get());
}
}
private Optional<NodeResources> resourcesFromSlime(Inspector resources) {
if ( ! resources.valid()) return Optional.empty();
return Optional.of(new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble(),
resources.field(bandwidthKey).asDouble(),
diskSpeedFromSlime(resources.field(diskSpeedKey)),
storageTypeFromSlime(resources.field(storageTypeKey))));
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
resourcesFromSlime(object.field(requestedResourcesKey)).orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector array) {
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return new History(events);
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
dockerImageRepoFromSlime(object.field(wantedDockerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(Version.fromString(object.asString()));
}
private Optional<String> dockerImageRepoFromSlime(Inspector object) {
if ( ! object.valid() || object.asString().isEmpty()) return Optional.empty();
return Optional.of(object.asString());
}
private Optional<DockerImage> dockerImageFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(DockerImage.fromString(object.asString()));
}
private Optional<Instant> instantFromSlime(Inspector object) {
if ( ! object.valid())
return Optional.empty();
return Optional.of(Instant.ofEpochMilli(object.asLong()));
}
private Optional<String> parentHostnameFromSlime(Inspector object) {
if (object.field(parentHostnameKey).valid())
return Optional.of(object.field(parentHostnameKey).asString());
else
return Optional.empty();
}
private Set<String> ipAddressesFromSlime(Inspector object, String key) {
ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses.build();
}
private Optional<String> modelNameFromSlime(Inspector object) {
if (object.field(modelNameKey).valid()) {
return Optional.of(object.field(modelNameKey).asString());
}
return Optional.empty();
}
private Optional<TenantName> reservedToFromSlime(Inspector object) {
if (! object.valid()) return Optional.empty();
if (object.type() != Type.STRING)
throw new IllegalArgumentException("Expected 'reservedTo' to be a string but is " + object);
return Optional.of(TenantName.from(object.asString()));
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
switch (eventTypeString) {
case "provisioned" : return History.Event.Type.provisioned;
case "deprovisioned" : return History.Event.Type.deprovisioned;
case "readied" : return History.Event.Type.readied;
case "reserved" : return History.Event.Type.reserved;
case "activated" : return History.Event.Type.activated;
case "wantToRetire": return History.Event.Type.wantToRetire;
case "retired" : return History.Event.Type.retired;
case "deactivated" : return History.Event.Type.deactivated;
case "parked" : return History.Event.Type.parked;
case "failed" : return History.Event.Type.failed;
case "deallocated" : return History.Event.Type.deallocated;
case "down" : return History.Event.Type.down;
case "requested" : return History.Event.Type.requested;
case "rebooted" : return History.Event.Type.rebooted;
case "osUpgraded" : return History.Event.Type.osUpgraded;
case "firmwareVerified" : return History.Event.Type.firmwareVerified;
}
throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
}
private String toString(History.Event.Type nodeEventType) {
switch (nodeEventType) {
case provisioned : return "provisioned";
case deprovisioned : return "deprovisioned";
case readied : return "readied";
case reserved : return "reserved";
case activated : return "activated";
case wantToRetire: return "wantToRetire";
case retired : return "retired";
case deactivated : return "deactivated";
case parked : return "parked";
case failed : return "failed";
case deallocated : return "deallocated";
case down : return "down";
case requested: return "requested";
case rebooted: return "rebooted";
case osUpgraded: return "osUpgraded";
case firmwareVerified: return "firmwareVerified";
}
throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined");
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
switch (eventAgentField.asString()) {
case "operator" : return Agent.operator;
case "application" : return Agent.application;
case "system" : return Agent.system;
case "NodeFailer" : return Agent.NodeFailer;
case "Rebalancer" : return Agent.Rebalancer;
case "DirtyExpirer" : return Agent.DirtyExpirer;
case "FailedExpirer" : return Agent.FailedExpirer;
case "InactiveExpirer" : return Agent.InactiveExpirer;
case "ProvisionedExpirer" : return Agent.ProvisionedExpirer;
case "ReservationExpirer" : return Agent.ReservationExpirer;
case "DynamicProvisioningMaintainer" : return Agent.DynamicProvisioningMaintainer;
}
throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
}
private String toString(Agent agent) {
switch (agent) {
case operator : return "operator";
case application : return "application";
case system : return "system";
case NodeFailer : return "NodeFailer";
case Rebalancer : return "Rebalancer";
case DirtyExpirer : return "DirtyExpirer";
case FailedExpirer : return "FailedExpirer";
case InactiveExpirer : return "InactiveExpirer";
case ProvisionedExpirer : return "ProvisionedExpirer";
case ReservationExpirer : return "ReservationExpirer";
case DynamicProvisioningMaintainer : return "DynamicProvisioningMaintainer";
}
throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined");
}
static NodeType nodeTypeFromString(String typeString) {
switch (typeString) {
case "tenant": return NodeType.tenant;
case "host": return NodeType.host;
case "proxy": return NodeType.proxy;
case "proxyhost": return NodeType.proxyhost;
case "config": return NodeType.config;
case "confighost": return NodeType.confighost;
case "controller": return NodeType.controller;
case "controllerhost": return NodeType.controllerhost;
case "devhost": return NodeType.devhost;
default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
}
}
static String toString(NodeType type) {
switch (type) {
case tenant: return "tenant";
case host: return "host";
case proxy: return "proxy";
case proxyhost: return "proxyhost";
case config: return "config";
case confighost: return "confighost";
case controller: return "controller";
case controllerhost: return "controllerhost";
case devhost: return "devhost";
}
throw new IllegalArgumentException("Serialized form of '" + type + "' not defined");
}
private static NodeResources.DiskSpeed diskSpeedFromSlime(Inspector diskSpeed) {
switch (diskSpeed.asString()) {
case "fast" : return NodeResources.DiskSpeed.fast;
case "slow" : return NodeResources.DiskSpeed.slow;
case "any" : return NodeResources.DiskSpeed.any;
default: throw new IllegalStateException("Illegal disk-speed value '" + diskSpeed.asString() + "'");
}
}
private static String diskSpeedToString(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalStateException("Illegal disk-speed value '" + diskSpeed + "'");
}
}
private static NodeResources.StorageType storageTypeFromSlime(Inspector storageType) {
if ( ! storageType.valid()) return NodeResources.StorageType.getDefault();
switch (storageType.asString()) {
case "remote" : return NodeResources.StorageType.remote;
case "local" : return NodeResources.StorageType.local;
case "any" : return NodeResources.StorageType.any;
default: throw new IllegalStateException("Illegal storage-type value '" + storageType.asString() + "'");
}
}
private static String storageTypeToString(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalStateException("Illegal storage-type value '" + storageType + "'");
}
}
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String idKey = "openStackId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentDockerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
private static final String bandwidthKey = "bandwidth";
private static final String diskSpeedKey = "diskSpeed";
private static final String storageTypeKey = "storageType";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedDockerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey), IP.Config::require);
toSlime(node.ipConfig().pool().asSet(), object.setArray(ipAddressPoolKey), UnaryOperator.identity() /* Pool already holds a validated address list */);
object.setString(idKey, node.id());
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().dockerImage().ifPresent(image -> object.setString(currentDockerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history(), object.setArray(historyKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(NodeResources resources, Cursor resourcesObject) {
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
resourcesObject.setDouble(bandwidthKey, resources.bandwidthGbps());
resourcesObject.setString(diskSpeedKey, diskSpeedToString(resources.diskSpeed()));
resourcesObject.setString(storageTypeKey, storageTypeToString(resources.storageType()));
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events())
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(Set<String> ipAddresses, Cursor array, UnaryOperator<Set<String>> validator) {
validator.apply(ipAddresses).stream().sorted(IP.NATURAL_ORDER).forEach(array::addString);
}
public Node fromJson(Node.State state, byte[] data) {
return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Node.State state, Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
new IP.Config(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey)),
object.field(hostnameKey).asString(),
parentHostnameFromSlime(object),
flavor,
statusFromSlime(object),
state,
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object.field(historyKey)),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
modelNameFromSlime(object),
reservedToFromSlime(object.field(reservedToKey)));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
dockerImageFromSlime(object.field(currentDockerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
instantFromSlime(object.field(firmwareCheckKey)));
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(resourcesFromSlime(resources).get());
}
}
private Optional<NodeResources> resourcesFromSlime(Inspector resources) {
if ( ! resources.valid()) return Optional.empty();
return Optional.of(new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble(),
resources.field(bandwidthKey).asDouble(),
diskSpeedFromSlime(resources.field(diskSpeedKey)),
storageTypeFromSlime(resources.field(storageTypeKey))));
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
resourcesFromSlime(object.field(requestedResourcesKey)).orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector array) {
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return new History(events);
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
dockerImageRepoFromSlime(object.field(wantedDockerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(Version.fromString(object.asString()));
}
private Optional<String> dockerImageRepoFromSlime(Inspector object) {
if ( ! object.valid() || object.asString().isEmpty()) return Optional.empty();
return Optional.of(object.asString());
}
private Optional<DockerImage> dockerImageFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(DockerImage.fromString(object.asString()));
}
private Optional<Instant> instantFromSlime(Inspector object) {
if ( ! object.valid())
return Optional.empty();
return Optional.of(Instant.ofEpochMilli(object.asLong()));
}
private Optional<String> parentHostnameFromSlime(Inspector object) {
if (object.field(parentHostnameKey).valid())
return Optional.of(object.field(parentHostnameKey).asString());
else
return Optional.empty();
}
private Set<String> ipAddressesFromSlime(Inspector object, String key) {
ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses.build();
}
private Optional<String> modelNameFromSlime(Inspector object) {
if (object.field(modelNameKey).valid()) {
return Optional.of(object.field(modelNameKey).asString());
}
return Optional.empty();
}
private Optional<TenantName> reservedToFromSlime(Inspector object) {
if (! object.valid()) return Optional.empty();
if (object.type() != Type.STRING)
throw new IllegalArgumentException("Expected 'reservedTo' to be a string but is " + object);
return Optional.of(TenantName.from(object.asString()));
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
switch (eventTypeString) {
case "provisioned" : return History.Event.Type.provisioned;
case "deprovisioned" : return History.Event.Type.deprovisioned;
case "readied" : return History.Event.Type.readied;
case "reserved" : return History.Event.Type.reserved;
case "activated" : return History.Event.Type.activated;
case "wantToRetire": return History.Event.Type.wantToRetire;
case "retired" : return History.Event.Type.retired;
case "deactivated" : return History.Event.Type.deactivated;
case "parked" : return History.Event.Type.parked;
case "failed" : return History.Event.Type.failed;
case "deallocated" : return History.Event.Type.deallocated;
case "down" : return History.Event.Type.down;
case "requested" : return History.Event.Type.requested;
case "rebooted" : return History.Event.Type.rebooted;
case "osUpgraded" : return History.Event.Type.osUpgraded;
case "firmwareVerified" : return History.Event.Type.firmwareVerified;
}
throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
}
private String toString(History.Event.Type nodeEventType) {
switch (nodeEventType) {
case provisioned : return "provisioned";
case deprovisioned : return "deprovisioned";
case readied : return "readied";
case reserved : return "reserved";
case activated : return "activated";
case wantToRetire: return "wantToRetire";
case retired : return "retired";
case deactivated : return "deactivated";
case parked : return "parked";
case failed : return "failed";
case deallocated : return "deallocated";
case down : return "down";
case requested: return "requested";
case rebooted: return "rebooted";
case osUpgraded: return "osUpgraded";
case firmwareVerified: return "firmwareVerified";
}
throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined");
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
switch (eventAgentField.asString()) {
case "operator" : return Agent.operator;
case "application" : return Agent.application;
case "system" : return Agent.system;
case "NodeFailer" : return Agent.NodeFailer;
case "Rebalancer" : return Agent.Rebalancer;
case "DirtyExpirer" : return Agent.DirtyExpirer;
case "FailedExpirer" : return Agent.FailedExpirer;
case "InactiveExpirer" : return Agent.InactiveExpirer;
case "ProvisionedExpirer" : return Agent.ProvisionedExpirer;
case "ReservationExpirer" : return Agent.ReservationExpirer;
case "DynamicProvisioningMaintainer" : return Agent.DynamicProvisioningMaintainer;
}
throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
}
private String toString(Agent agent) {
switch (agent) {
case operator : return "operator";
case application : return "application";
case system : return "system";
case NodeFailer : return "NodeFailer";
case Rebalancer : return "Rebalancer";
case DirtyExpirer : return "DirtyExpirer";
case FailedExpirer : return "FailedExpirer";
case InactiveExpirer : return "InactiveExpirer";
case ProvisionedExpirer : return "ProvisionedExpirer";
case ReservationExpirer : return "ReservationExpirer";
case DynamicProvisioningMaintainer : return "DynamicProvisioningMaintainer";
}
throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined");
}
static NodeType nodeTypeFromString(String typeString) {
switch (typeString) {
case "tenant": return NodeType.tenant;
case "host": return NodeType.host;
case "proxy": return NodeType.proxy;
case "proxyhost": return NodeType.proxyhost;
case "config": return NodeType.config;
case "confighost": return NodeType.confighost;
case "controller": return NodeType.controller;
case "controllerhost": return NodeType.controllerhost;
case "devhost": return NodeType.devhost;
default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
}
}
static String toString(NodeType type) {
switch (type) {
case tenant: return "tenant";
case host: return "host";
case proxy: return "proxy";
case proxyhost: return "proxyhost";
case config: return "config";
case confighost: return "confighost";
case controller: return "controller";
case controllerhost: return "controllerhost";
case devhost: return "devhost";
}
throw new IllegalArgumentException("Serialized form of '" + type + "' not defined");
}
private static NodeResources.DiskSpeed diskSpeedFromSlime(Inspector diskSpeed) {
switch (diskSpeed.asString()) {
case "fast" : return NodeResources.DiskSpeed.fast;
case "slow" : return NodeResources.DiskSpeed.slow;
case "any" : return NodeResources.DiskSpeed.any;
default: throw new IllegalStateException("Illegal disk-speed value '" + diskSpeed.asString() + "'");
}
}
private static String diskSpeedToString(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalStateException("Illegal disk-speed value '" + diskSpeed + "'");
}
}
private static NodeResources.StorageType storageTypeFromSlime(Inspector storageType) {
if ( ! storageType.valid()) return NodeResources.StorageType.getDefault();
switch (storageType.asString()) {
case "remote" : return NodeResources.StorageType.remote;
case "local" : return NodeResources.StorageType.local;
case "any" : return NodeResources.StorageType.any;
default: throw new IllegalStateException("Illegal storage-type value '" + storageType.asString() + "'");
}
}
private static String storageTypeToString(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalStateException("Illegal storage-type value '" + storageType + "'");
}
}
} |
Consider having a method on `ClusterSpec` that returns the full docker image | private void toSlime(Node node, boolean allFields, Cursor object) {
object.setString("url", nodeParentUrl + node.hostname());
if ( ! allFields) return;
object.setString("id", node.hostname());
object.setString("state", serializer.toString(node.state()));
object.setString("type", node.type().name());
object.setString("hostname", node.hostname());
object.setString("type", serializer.toString(node.type()));
if (node.parentHostname().isPresent()) {
object.setString("parentHostname", node.parentHostname().get());
}
object.setString("openStackId", node.id());
object.setString("flavor", node.flavor().name());
node.reservedTo().ifPresent(reservedTo -> object.setString("reservedTo", reservedTo.value()));
if (node.flavor().isConfigured())
object.setDouble("cpuCores", node.flavor().getMinCpuCores());
toSlime(node.flavor().resources(), object.setObject("resources"));
if (node.flavor().cost() > 0)
object.setLong("cost", node.flavor().cost());
object.setString("environment", node.flavor().getType().name());
node.allocation().ifPresent(allocation -> {
toSlime(allocation.owner(), object.setObject("owner"));
toSlime(allocation.membership(), object.setObject("membership"));
object.setLong("restartGeneration", allocation.restartGeneration().wanted());
object.setLong("currentRestartGeneration", allocation.restartGeneration().current());
String wantedVespaVersion = allocation.membership().cluster().vespaVersion().toFullString();
Optional<String> dockerImageRepo = allocation.membership().cluster().dockerImageRepo();
object.setString("wantedDockerImage", dockerImageRepo.map(s -> s + ":" + wantedVespaVersion)
.orElseGet(() -> nodeRepository.dockerImage(node).withTag(allocation.membership().cluster().vespaVersion()).asString()));
object.setString("wantedVespaVersion", wantedVespaVersion);
toSlime(allocation.requestedResources(), object.setObject("requestedResources"));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray("networkPorts")));
orchestrator.apply(new HostName(node.hostname()))
.ifPresent(info -> {
object.setBool("allowedToBeDown", info.status().isSuspended());
info.suspendedSince().ifPresent(since -> object.setLong("suspendedSinceMillis", since.toEpochMilli()));
});
});
object.setLong("rebootGeneration", node.status().reboot().wanted());
object.setLong("currentRebootGeneration", node.status().reboot().current());
node.status().osVersion().current().ifPresent(version -> object.setString("currentOsVersion", version.toFullString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString("wantedOsVersion", version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong("currentFirmwareCheck", instant.toEpochMilli()));
if (node.type().isDockerHost())
nodeRepository.firmwareChecks().requiredAfter().ifPresent(after -> object.setLong("wantedFirmwareCheck", after.toEpochMilli()));
node.status().vespaVersion().ifPresent(version -> object.setString("vespaVersion", version.toFullString()));
currentDockerImage(node).ifPresent(dockerImage -> object.setString("currentDockerImage", dockerImage.asString()));
object.setLong("failCount", node.status().failCount());
object.setBool("wantToRetire", node.status().wantToRetire());
object.setBool("wantToDeprovision", node.status().wantToDeprovision());
toSlime(node.history(), object.setArray("history"));
ipAddressesToSlime(node.ipAddresses(), object.setArray("ipAddresses"));
ipAddressesToSlime(node.ipAddressPool().asSet(), object.setArray("additionalIpAddresses"));
node.reports().toSlime(object, "reports");
node.modelName().ifPresent(modelName -> object.setString("modelName", modelName));
} | Optional<String> dockerImageRepo = allocation.membership().cluster().dockerImageRepo(); | private void toSlime(Node node, boolean allFields, Cursor object) {
object.setString("url", nodeParentUrl + node.hostname());
if ( ! allFields) return;
object.setString("id", node.hostname());
object.setString("state", serializer.toString(node.state()));
object.setString("type", node.type().name());
object.setString("hostname", node.hostname());
object.setString("type", serializer.toString(node.type()));
if (node.parentHostname().isPresent()) {
object.setString("parentHostname", node.parentHostname().get());
}
object.setString("openStackId", node.id());
object.setString("flavor", node.flavor().name());
node.reservedTo().ifPresent(reservedTo -> object.setString("reservedTo", reservedTo.value()));
if (node.flavor().isConfigured())
object.setDouble("cpuCores", node.flavor().getMinCpuCores());
toSlime(node.flavor().resources(), object.setObject("resources"));
if (node.flavor().cost() > 0)
object.setLong("cost", node.flavor().cost());
object.setString("environment", node.flavor().getType().name());
node.allocation().ifPresent(allocation -> {
toSlime(allocation.owner(), object.setObject("owner"));
toSlime(allocation.membership(), object.setObject("membership"));
object.setLong("restartGeneration", allocation.restartGeneration().wanted());
object.setLong("currentRestartGeneration", allocation.restartGeneration().current());
object.setString("wantedDockerImage", allocation.membership().cluster().dockerImage()
.orElseGet(() -> nodeRepository.dockerImage(node).withTag(allocation.membership().cluster().vespaVersion()).asString()));
object.setString("wantedVespaVersion", allocation.membership().cluster().vespaVersion().toFullString());
toSlime(allocation.requestedResources(), object.setObject("requestedResources"));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray("networkPorts")));
orchestrator.apply(new HostName(node.hostname()))
.ifPresent(info -> {
object.setBool("allowedToBeDown", info.status().isSuspended());
info.suspendedSince().ifPresent(since -> object.setLong("suspendedSinceMillis", since.toEpochMilli()));
});
});
object.setLong("rebootGeneration", node.status().reboot().wanted());
object.setLong("currentRebootGeneration", node.status().reboot().current());
node.status().osVersion().current().ifPresent(version -> object.setString("currentOsVersion", version.toFullString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString("wantedOsVersion", version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong("currentFirmwareCheck", instant.toEpochMilli()));
if (node.type().isDockerHost())
nodeRepository.firmwareChecks().requiredAfter().ifPresent(after -> object.setLong("wantedFirmwareCheck", after.toEpochMilli()));
node.status().vespaVersion().ifPresent(version -> object.setString("vespaVersion", version.toFullString()));
currentDockerImage(node).ifPresent(dockerImage -> object.setString("currentDockerImage", dockerImage.asString()));
object.setLong("failCount", node.status().failCount());
object.setBool("wantToRetire", node.status().wantToRetire());
object.setBool("wantToDeprovision", node.status().wantToDeprovision());
toSlime(node.history(), object.setArray("history"));
ipAddressesToSlime(node.ipAddresses(), object.setArray("ipAddresses"));
ipAddressesToSlime(node.ipAddressPool().asSet(), object.setArray("additionalIpAddresses"));
node.reports().toSlime(object, "reports");
node.modelName().ifPresent(modelName -> object.setString("modelName", modelName));
} | class NodesResponse extends HttpResponse {
/** The responses this can create */
public enum ResponseType { nodeList, stateList, nodesInStateList, singleNode }
/** The request url minus parameters, with a trailing slash added if missing */
private final String parentUrl;
/** The parent url of nodes */
private final String nodeParentUrl;
private final NodeFilter filter;
private final boolean recursive;
private final Function<HostName, Optional<HostInfo>> orchestrator;
private final NodeRepository nodeRepository;
private final Slime slime;
private final NodeSerializer serializer = new NodeSerializer();
public NodesResponse(ResponseType responseType, HttpRequest request,
Orchestrator orchestrator, NodeRepository nodeRepository) {
super(200);
this.parentUrl = toParentUrl(request);
this.nodeParentUrl = toNodeParentUrl(request);
filter = NodesApiHandler.toNodeFilter(request);
this.recursive = request.getBooleanProperty("recursive");
this.orchestrator = orchestrator.getHostResolver();
this.nodeRepository = nodeRepository;
slime = new Slime();
Cursor root = slime.setObject();
switch (responseType) {
case nodeList: nodesToSlime(root); break;
case stateList : statesToSlime(root); break;
case nodesInStateList: nodesToSlime(serializer.stateFrom(lastElement(parentUrl)), root); break;
case singleNode : nodeToSlime(lastElement(parentUrl), root); break;
default: throw new IllegalArgumentException();
}
}
private String toParentUrl(HttpRequest request) {
URI uri = request.getUri();
String parentUrl = uri.getScheme() + ":
if ( ! parentUrl.endsWith("/"))
parentUrl = parentUrl + "/";
return parentUrl;
}
private String toNodeParentUrl(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
@Override
public void render(OutputStream stream) throws IOException {
new JsonFormat(true).encode(stream, slime);
}
@Override
public String getContentType() {
return "application/json";
}
private void statesToSlime(Cursor root) {
Cursor states = root.setObject("states");
for (Node.State state : Node.State.values())
toSlime(state, states.setObject(serializer.toString(state)));
}
private void toSlime(Node.State state, Cursor object) {
object.setString("url", parentUrl + serializer.toString(state));
if (recursive)
nodesToSlime(state, object);
}
/** Outputs the nodes in the given state to a node array */
private void nodesToSlime(Node.State state, Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
for (NodeType type : NodeType.values())
toSlime(nodeRepository.getNodes(type, state), nodeArray);
}
/** Outputs all the nodes to a node array */
private void nodesToSlime(Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
toSlime(nodeRepository.getNodes(), nodeArray);
}
private void toSlime(List<Node> nodes, Cursor array) {
for (Node node : nodes) {
if ( ! filter.matches(node)) continue;
toSlime(node, recursive, array.addObject());
}
}
private void nodeToSlime(String hostname, Cursor object) {
Node node = nodeRepository.getNode(hostname).orElseThrow(() ->
new NotFoundException("No node with hostname '" + hostname + "'"));
toSlime(node, true, object);
}
private void toSlime(ApplicationId id, Cursor object) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
}
private void toSlime(ClusterMembership membership, Cursor object) {
object.setString("clustertype", membership.cluster().type().name());
object.setString("clusterid", membership.cluster().id().value());
object.setString("group", String.valueOf(membership.cluster().group().get().index()));
object.setLong("index", membership.index());
object.setBool("retired", membership.retired());
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events()) {
Cursor object = array.addObject();
object.setString("event", event.type().name());
object.setLong("at", event.at().toEpochMilli());
object.setString("agent", event.agent().name());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", serializer.toString(resources.diskSpeed()));
object.setString("storageType", serializer.toString(resources.storageType()));
}
private Optional<DockerImage> currentDockerImage(Node node) {
return node.status().dockerImage()
.or(() -> Optional.of(node)
.filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
.flatMap(n -> n.status().vespaVersion()
.map(version -> nodeRepository.dockerImage(n).withTag(version))));
}
private void ipAddressesToSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.forEach(array::addString);
}
private String lastElement(String path) {
if (path.endsWith("/"))
path = path.substring(0, path.length()-1);
int lastSlash = path.lastIndexOf("/");
if (lastSlash < 0) return path;
return path.substring(lastSlash+1);
}
} | class NodesResponse extends HttpResponse {
/** The responses this can create */
public enum ResponseType { nodeList, stateList, nodesInStateList, singleNode }
/** The request url minus parameters, with a trailing slash added if missing */
private final String parentUrl;
/** The parent url of nodes */
private final String nodeParentUrl;
private final NodeFilter filter;
private final boolean recursive;
private final Function<HostName, Optional<HostInfo>> orchestrator;
private final NodeRepository nodeRepository;
private final Slime slime;
private final NodeSerializer serializer = new NodeSerializer();
public NodesResponse(ResponseType responseType, HttpRequest request,
Orchestrator orchestrator, NodeRepository nodeRepository) {
super(200);
this.parentUrl = toParentUrl(request);
this.nodeParentUrl = toNodeParentUrl(request);
filter = NodesApiHandler.toNodeFilter(request);
this.recursive = request.getBooleanProperty("recursive");
this.orchestrator = orchestrator.getHostResolver();
this.nodeRepository = nodeRepository;
slime = new Slime();
Cursor root = slime.setObject();
switch (responseType) {
case nodeList: nodesToSlime(root); break;
case stateList : statesToSlime(root); break;
case nodesInStateList: nodesToSlime(serializer.stateFrom(lastElement(parentUrl)), root); break;
case singleNode : nodeToSlime(lastElement(parentUrl), root); break;
default: throw new IllegalArgumentException();
}
}
private String toParentUrl(HttpRequest request) {
URI uri = request.getUri();
String parentUrl = uri.getScheme() + ":
if ( ! parentUrl.endsWith("/"))
parentUrl = parentUrl + "/";
return parentUrl;
}
private String toNodeParentUrl(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
@Override
public void render(OutputStream stream) throws IOException {
new JsonFormat(true).encode(stream, slime);
}
@Override
public String getContentType() {
return "application/json";
}
private void statesToSlime(Cursor root) {
Cursor states = root.setObject("states");
for (Node.State state : Node.State.values())
toSlime(state, states.setObject(serializer.toString(state)));
}
private void toSlime(Node.State state, Cursor object) {
object.setString("url", parentUrl + serializer.toString(state));
if (recursive)
nodesToSlime(state, object);
}
/** Outputs the nodes in the given state to a node array */
private void nodesToSlime(Node.State state, Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
for (NodeType type : NodeType.values())
toSlime(nodeRepository.getNodes(type, state), nodeArray);
}
/** Outputs all the nodes to a node array */
private void nodesToSlime(Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
toSlime(nodeRepository.getNodes(), nodeArray);
}
private void toSlime(List<Node> nodes, Cursor array) {
for (Node node : nodes) {
if ( ! filter.matches(node)) continue;
toSlime(node, recursive, array.addObject());
}
}
private void nodeToSlime(String hostname, Cursor object) {
Node node = nodeRepository.getNode(hostname).orElseThrow(() ->
new NotFoundException("No node with hostname '" + hostname + "'"));
toSlime(node, true, object);
}
private void toSlime(ApplicationId id, Cursor object) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
}
private void toSlime(ClusterMembership membership, Cursor object) {
object.setString("clustertype", membership.cluster().type().name());
object.setString("clusterid", membership.cluster().id().value());
object.setString("group", String.valueOf(membership.cluster().group().get().index()));
object.setLong("index", membership.index());
object.setBool("retired", membership.retired());
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events()) {
Cursor object = array.addObject();
object.setString("event", event.type().name());
object.setLong("at", event.at().toEpochMilli());
object.setString("agent", event.agent().name());
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", serializer.toString(resources.diskSpeed()));
object.setString("storageType", serializer.toString(resources.storageType()));
}
private Optional<DockerImage> currentDockerImage(Node node) {
return node.status().dockerImage()
.or(() -> Optional.of(node)
.filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
.flatMap(n -> n.status().vespaVersion()
.map(version -> nodeRepository.dockerImage(n).withTag(version))));
}
private void ipAddressesToSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.forEach(array::addString);
}
private String lastElement(String path) {
if (path.endsWith("/"))
path = path.substring(0, path.length()-1);
int lastSlash = path.lastIndexOf("/");
if (lastSlash < 0) return path;
return path.substring(lastSlash+1);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.